1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements routines for folding instructions into simpler forms
10 // that do not require creating new instructions.  This does constant folding
11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13 // ("and i32 %x, %x" -> "%x").  All operands are assumed to have already been
14 // simplified: This is usually true and assuming it simplifies the logic (if
15 // they have not been simplified then results are correct but maybe suboptimal).
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "llvm/Analysis/InstructionSimplify.h"
20 
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/CmpInstAnalysis.h"
29 #include "llvm/Analysis/ConstantFolding.h"
30 #include "llvm/Analysis/InstSimplifyFolder.h"
31 #include "llvm/Analysis/LoopAnalysisManager.h"
32 #include "llvm/Analysis/MemoryBuiltins.h"
33 #include "llvm/Analysis/OverflowInstAnalysis.h"
34 #include "llvm/Analysis/ValueTracking.h"
35 #include "llvm/Analysis/VectorUtils.h"
36 #include "llvm/IR/ConstantRange.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/GetElementPtrTypeIterator.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/Operator.h"
44 #include "llvm/IR/PatternMatch.h"
45 #include "llvm/IR/ValueHandle.h"
46 #include "llvm/Support/KnownBits.h"
47 #include <algorithm>
48 using namespace llvm;
49 using namespace llvm::PatternMatch;
50 
51 #define DEBUG_TYPE "instsimplify"
52 
53 enum { RecursionLimit = 3 };
54 
55 STATISTIC(NumExpand,  "Number of expansions");
56 STATISTIC(NumReassoc, "Number of reassociations");
57 
58 static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned);
59 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
60 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
61                              const SimplifyQuery &, unsigned);
62 static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
63                             unsigned);
64 static Value *SimplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
65                             const SimplifyQuery &, unsigned);
66 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &,
67                               unsigned);
68 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
69                                const SimplifyQuery &Q, unsigned MaxRecurse);
70 static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
71 static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned);
72 static Value *SimplifyCastInst(unsigned, Value *, Type *,
73                                const SimplifyQuery &, unsigned);
74 static Value *SimplifyGEPInst(Type *, Value *, ArrayRef<Value *>, bool,
75                               const SimplifyQuery &, unsigned);
76 static Value *SimplifySelectInst(Value *, Value *, Value *,
77                                  const SimplifyQuery &, unsigned);
78 
79 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal,
80                                      Value *FalseVal) {
81   BinaryOperator::BinaryOps BinOpCode;
82   if (auto *BO = dyn_cast<BinaryOperator>(Cond))
83     BinOpCode = BO->getOpcode();
84   else
85     return nullptr;
86 
87   CmpInst::Predicate ExpectedPred, Pred1, Pred2;
88   if (BinOpCode == BinaryOperator::Or) {
89     ExpectedPred = ICmpInst::ICMP_NE;
90   } else if (BinOpCode == BinaryOperator::And) {
91     ExpectedPred = ICmpInst::ICMP_EQ;
92   } else
93     return nullptr;
94 
95   // %A = icmp eq %TV, %FV
96   // %B = icmp eq %X, %Y (and one of these is a select operand)
97   // %C = and %A, %B
98   // %D = select %C, %TV, %FV
99   // -->
100   // %FV
101 
102   // %A = icmp ne %TV, %FV
103   // %B = icmp ne %X, %Y (and one of these is a select operand)
104   // %C = or %A, %B
105   // %D = select %C, %TV, %FV
106   // -->
107   // %TV
108   Value *X, *Y;
109   if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal),
110                                       m_Specific(FalseVal)),
111                              m_ICmp(Pred2, m_Value(X), m_Value(Y)))) ||
112       Pred1 != Pred2 || Pred1 != ExpectedPred)
113     return nullptr;
114 
115   if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal)
116     return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal;
117 
118   return nullptr;
119 }
120 
121 /// For a boolean type or a vector of boolean type, return false or a vector
122 /// with every element false.
123 static Constant *getFalse(Type *Ty) {
124   return ConstantInt::getFalse(Ty);
125 }
126 
127 /// For a boolean type or a vector of boolean type, return true or a vector
128 /// with every element true.
129 static Constant *getTrue(Type *Ty) {
130   return ConstantInt::getTrue(Ty);
131 }
132 
133 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
134 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
135                           Value *RHS) {
136   CmpInst *Cmp = dyn_cast<CmpInst>(V);
137   if (!Cmp)
138     return false;
139   CmpInst::Predicate CPred = Cmp->getPredicate();
140   Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
141   if (CPred == Pred && CLHS == LHS && CRHS == RHS)
142     return true;
143   return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
144     CRHS == LHS;
145 }
146 
147 /// Simplify comparison with true or false branch of select:
148 ///  %sel = select i1 %cond, i32 %tv, i32 %fv
149 ///  %cmp = icmp sle i32 %sel, %rhs
150 /// Compose new comparison by substituting %sel with either %tv or %fv
151 /// and see if it simplifies.
152 static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS,
153                                  Value *RHS, Value *Cond,
154                                  const SimplifyQuery &Q, unsigned MaxRecurse,
155                                  Constant *TrueOrFalse) {
156   Value *SimplifiedCmp = SimplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
157   if (SimplifiedCmp == Cond) {
158     // %cmp simplified to the select condition (%cond).
159     return TrueOrFalse;
160   } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
161     // It didn't simplify. However, if composed comparison is equivalent
162     // to the select condition (%cond) then we can replace it.
163     return TrueOrFalse;
164   }
165   return SimplifiedCmp;
166 }
167 
168 /// Simplify comparison with true branch of select
169 static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS,
170                                      Value *RHS, Value *Cond,
171                                      const SimplifyQuery &Q,
172                                      unsigned MaxRecurse) {
173   return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
174                             getTrue(Cond->getType()));
175 }
176 
177 /// Simplify comparison with false branch of select
178 static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS,
179                                       Value *RHS, Value *Cond,
180                                       const SimplifyQuery &Q,
181                                       unsigned MaxRecurse) {
182   return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
183                             getFalse(Cond->getType()));
184 }
185 
186 /// We know comparison with both branches of select can be simplified, but they
187 /// are not equal. This routine handles some logical simplifications.
188 static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp,
189                                                Value *Cond,
190                                                const SimplifyQuery &Q,
191                                                unsigned MaxRecurse) {
192   // If the false value simplified to false, then the result of the compare
193   // is equal to "Cond && TCmp".  This also catches the case when the false
194   // value simplified to false and the true value to true, returning "Cond".
195   // Folding select to and/or isn't poison-safe in general; impliesPoison
196   // checks whether folding it does not convert a well-defined value into
197   // poison.
198   if (match(FCmp, m_Zero()) && impliesPoison(TCmp, Cond))
199     if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
200       return V;
201   // If the true value simplified to true, then the result of the compare
202   // is equal to "Cond || FCmp".
203   if (match(TCmp, m_One()) && impliesPoison(FCmp, Cond))
204     if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
205       return V;
206   // Finally, if the false value simplified to true and the true value to
207   // false, then the result of the compare is equal to "!Cond".
208   if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
209     if (Value *V = SimplifyXorInst(
210             Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
211       return V;
212   return nullptr;
213 }
214 
215 /// Does the given value dominate the specified phi node?
216 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
217   Instruction *I = dyn_cast<Instruction>(V);
218   if (!I)
219     // Arguments and constants dominate all instructions.
220     return true;
221 
222   // If we are processing instructions (and/or basic blocks) that have not been
223   // fully added to a function, the parent nodes may still be null. Simply
224   // return the conservative answer in these cases.
225   if (!I->getParent() || !P->getParent() || !I->getFunction())
226     return false;
227 
228   // If we have a DominatorTree then do a precise test.
229   if (DT)
230     return DT->dominates(I, P);
231 
232   // Otherwise, if the instruction is in the entry block and is not an invoke,
233   // then it obviously dominates all phi nodes.
234   if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
235       !isa<CallBrInst>(I))
236     return true;
237 
238   return false;
239 }
240 
241 /// Try to simplify a binary operator of form "V op OtherOp" where V is
242 /// "(B0 opex B1)" by distributing 'op' across 'opex' as
243 /// "(B0 op OtherOp) opex (B1 op OtherOp)".
244 static Value *expandBinOp(Instruction::BinaryOps Opcode, Value *V,
245                           Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
246                           const SimplifyQuery &Q, unsigned MaxRecurse) {
247   auto *B = dyn_cast<BinaryOperator>(V);
248   if (!B || B->getOpcode() != OpcodeToExpand)
249     return nullptr;
250   Value *B0 = B->getOperand(0), *B1 = B->getOperand(1);
251   Value *L = SimplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(),
252                            MaxRecurse);
253   if (!L)
254     return nullptr;
255   Value *R = SimplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(),
256                            MaxRecurse);
257   if (!R)
258     return nullptr;
259 
260   // Does the expanded pair of binops simplify to the existing binop?
261   if ((L == B0 && R == B1) ||
262       (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
263     ++NumExpand;
264     return B;
265   }
266 
267   // Otherwise, return "L op' R" if it simplifies.
268   Value *S = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
269   if (!S)
270     return nullptr;
271 
272   ++NumExpand;
273   return S;
274 }
275 
276 /// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
277 /// distributing op over op'.
278 static Value *expandCommutativeBinOp(Instruction::BinaryOps Opcode,
279                                      Value *L, Value *R,
280                                      Instruction::BinaryOps OpcodeToExpand,
281                                      const SimplifyQuery &Q,
282                                      unsigned MaxRecurse) {
283   // Recursion is always used, so bail out at once if we already hit the limit.
284   if (!MaxRecurse--)
285     return nullptr;
286 
287   if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
288     return V;
289   if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
290     return V;
291   return nullptr;
292 }
293 
294 /// Generic simplifications for associative binary operations.
295 /// Returns the simpler value, or null if none was found.
296 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
297                                        Value *LHS, Value *RHS,
298                                        const SimplifyQuery &Q,
299                                        unsigned MaxRecurse) {
300   assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
301 
302   // Recursion is always used, so bail out at once if we already hit the limit.
303   if (!MaxRecurse--)
304     return nullptr;
305 
306   BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
307   BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
308 
309   // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
310   if (Op0 && Op0->getOpcode() == Opcode) {
311     Value *A = Op0->getOperand(0);
312     Value *B = Op0->getOperand(1);
313     Value *C = RHS;
314 
315     // Does "B op C" simplify?
316     if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
317       // It does!  Return "A op V" if it simplifies or is already available.
318       // If V equals B then "A op V" is just the LHS.
319       if (V == B) return LHS;
320       // Otherwise return "A op V" if it simplifies.
321       if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
322         ++NumReassoc;
323         return W;
324       }
325     }
326   }
327 
328   // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
329   if (Op1 && Op1->getOpcode() == Opcode) {
330     Value *A = LHS;
331     Value *B = Op1->getOperand(0);
332     Value *C = Op1->getOperand(1);
333 
334     // Does "A op B" simplify?
335     if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
336       // It does!  Return "V op C" if it simplifies or is already available.
337       // If V equals B then "V op C" is just the RHS.
338       if (V == B) return RHS;
339       // Otherwise return "V op C" if it simplifies.
340       if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
341         ++NumReassoc;
342         return W;
343       }
344     }
345   }
346 
347   // The remaining transforms require commutativity as well as associativity.
348   if (!Instruction::isCommutative(Opcode))
349     return nullptr;
350 
351   // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
352   if (Op0 && Op0->getOpcode() == Opcode) {
353     Value *A = Op0->getOperand(0);
354     Value *B = Op0->getOperand(1);
355     Value *C = RHS;
356 
357     // Does "C op A" simplify?
358     if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
359       // It does!  Return "V op B" if it simplifies or is already available.
360       // If V equals A then "V op B" is just the LHS.
361       if (V == A) return LHS;
362       // Otherwise return "V op B" if it simplifies.
363       if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
364         ++NumReassoc;
365         return W;
366       }
367     }
368   }
369 
370   // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
371   if (Op1 && Op1->getOpcode() == Opcode) {
372     Value *A = LHS;
373     Value *B = Op1->getOperand(0);
374     Value *C = Op1->getOperand(1);
375 
376     // Does "C op A" simplify?
377     if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
378       // It does!  Return "B op V" if it simplifies or is already available.
379       // If V equals C then "B op V" is just the RHS.
380       if (V == C) return RHS;
381       // Otherwise return "B op V" if it simplifies.
382       if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
383         ++NumReassoc;
384         return W;
385       }
386     }
387   }
388 
389   return nullptr;
390 }
391 
392 /// In the case of a binary operation with a select instruction as an operand,
393 /// try to simplify the binop by seeing whether evaluating it on both branches
394 /// of the select results in the same value. Returns the common value if so,
395 /// otherwise returns null.
396 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
397                                     Value *RHS, const SimplifyQuery &Q,
398                                     unsigned MaxRecurse) {
399   // Recursion is always used, so bail out at once if we already hit the limit.
400   if (!MaxRecurse--)
401     return nullptr;
402 
403   SelectInst *SI;
404   if (isa<SelectInst>(LHS)) {
405     SI = cast<SelectInst>(LHS);
406   } else {
407     assert(isa<SelectInst>(RHS) && "No select instruction operand!");
408     SI = cast<SelectInst>(RHS);
409   }
410 
411   // Evaluate the BinOp on the true and false branches of the select.
412   Value *TV;
413   Value *FV;
414   if (SI == LHS) {
415     TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
416     FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
417   } else {
418     TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
419     FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
420   }
421 
422   // If they simplified to the same value, then return the common value.
423   // If they both failed to simplify then return null.
424   if (TV == FV)
425     return TV;
426 
427   // If one branch simplified to undef, return the other one.
428   if (TV && Q.isUndefValue(TV))
429     return FV;
430   if (FV && Q.isUndefValue(FV))
431     return TV;
432 
433   // If applying the operation did not change the true and false select values,
434   // then the result of the binop is the select itself.
435   if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
436     return SI;
437 
438   // If one branch simplified and the other did not, and the simplified
439   // value is equal to the unsimplified one, return the simplified value.
440   // For example, select (cond, X, X & Z) & Z -> X & Z.
441   if ((FV && !TV) || (TV && !FV)) {
442     // Check that the simplified value has the form "X op Y" where "op" is the
443     // same as the original operation.
444     Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
445     if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) {
446       // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
447       // We already know that "op" is the same as for the simplified value.  See
448       // if the operands match too.  If so, return the simplified value.
449       Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
450       Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
451       Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
452       if (Simplified->getOperand(0) == UnsimplifiedLHS &&
453           Simplified->getOperand(1) == UnsimplifiedRHS)
454         return Simplified;
455       if (Simplified->isCommutative() &&
456           Simplified->getOperand(1) == UnsimplifiedLHS &&
457           Simplified->getOperand(0) == UnsimplifiedRHS)
458         return Simplified;
459     }
460   }
461 
462   return nullptr;
463 }
464 
465 /// In the case of a comparison with a select instruction, try to simplify the
466 /// comparison by seeing whether both branches of the select result in the same
467 /// value. Returns the common value if so, otherwise returns null.
468 /// For example, if we have:
469 ///  %tmp = select i1 %cmp, i32 1, i32 2
470 ///  %cmp1 = icmp sle i32 %tmp, 3
471 /// We can simplify %cmp1 to true, because both branches of select are
472 /// less than 3. We compose new comparison by substituting %tmp with both
473 /// branches of select and see if it can be simplified.
474 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
475                                   Value *RHS, const SimplifyQuery &Q,
476                                   unsigned MaxRecurse) {
477   // Recursion is always used, so bail out at once if we already hit the limit.
478   if (!MaxRecurse--)
479     return nullptr;
480 
481   // Make sure the select is on the LHS.
482   if (!isa<SelectInst>(LHS)) {
483     std::swap(LHS, RHS);
484     Pred = CmpInst::getSwappedPredicate(Pred);
485   }
486   assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
487   SelectInst *SI = cast<SelectInst>(LHS);
488   Value *Cond = SI->getCondition();
489   Value *TV = SI->getTrueValue();
490   Value *FV = SI->getFalseValue();
491 
492   // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
493   // Does "cmp TV, RHS" simplify?
494   Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
495   if (!TCmp)
496     return nullptr;
497 
498   // Does "cmp FV, RHS" simplify?
499   Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
500   if (!FCmp)
501     return nullptr;
502 
503   // If both sides simplified to the same value, then use it as the result of
504   // the original comparison.
505   if (TCmp == FCmp)
506     return TCmp;
507 
508   // The remaining cases only make sense if the select condition has the same
509   // type as the result of the comparison, so bail out if this is not so.
510   if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
511     return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
512 
513   return nullptr;
514 }
515 
516 /// In the case of a binary operation with an operand that is a PHI instruction,
517 /// try to simplify the binop by seeing whether evaluating it on the incoming
518 /// phi values yields the same result for every value. If so returns the common
519 /// value, otherwise returns null.
520 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
521                                  Value *RHS, const SimplifyQuery &Q,
522                                  unsigned MaxRecurse) {
523   // Recursion is always used, so bail out at once if we already hit the limit.
524   if (!MaxRecurse--)
525     return nullptr;
526 
527   PHINode *PI;
528   if (isa<PHINode>(LHS)) {
529     PI = cast<PHINode>(LHS);
530     // Bail out if RHS and the phi may be mutually interdependent due to a loop.
531     if (!valueDominatesPHI(RHS, PI, Q.DT))
532       return nullptr;
533   } else {
534     assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
535     PI = cast<PHINode>(RHS);
536     // Bail out if LHS and the phi may be mutually interdependent due to a loop.
537     if (!valueDominatesPHI(LHS, PI, Q.DT))
538       return nullptr;
539   }
540 
541   // Evaluate the BinOp on the incoming phi values.
542   Value *CommonValue = nullptr;
543   for (Value *Incoming : PI->incoming_values()) {
544     // If the incoming value is the phi node itself, it can safely be skipped.
545     if (Incoming == PI) continue;
546     Value *V = PI == LHS ?
547       SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) :
548       SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
549     // If the operation failed to simplify, or simplified to a different value
550     // to previously, then give up.
551     if (!V || (CommonValue && V != CommonValue))
552       return nullptr;
553     CommonValue = V;
554   }
555 
556   return CommonValue;
557 }
558 
559 /// In the case of a comparison with a PHI instruction, try to simplify the
560 /// comparison by seeing whether comparing with all of the incoming phi values
561 /// yields the same result every time. If so returns the common result,
562 /// otherwise returns null.
563 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
564                                const SimplifyQuery &Q, unsigned MaxRecurse) {
565   // Recursion is always used, so bail out at once if we already hit the limit.
566   if (!MaxRecurse--)
567     return nullptr;
568 
569   // Make sure the phi is on the LHS.
570   if (!isa<PHINode>(LHS)) {
571     std::swap(LHS, RHS);
572     Pred = CmpInst::getSwappedPredicate(Pred);
573   }
574   assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
575   PHINode *PI = cast<PHINode>(LHS);
576 
577   // Bail out if RHS and the phi may be mutually interdependent due to a loop.
578   if (!valueDominatesPHI(RHS, PI, Q.DT))
579     return nullptr;
580 
581   // Evaluate the BinOp on the incoming phi values.
582   Value *CommonValue = nullptr;
583   for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
584     Value *Incoming = PI->getIncomingValue(u);
585     Instruction *InTI = PI->getIncomingBlock(u)->getTerminator();
586     // If the incoming value is the phi node itself, it can safely be skipped.
587     if (Incoming == PI) continue;
588     // Change the context instruction to the "edge" that flows into the phi.
589     // This is important because that is where incoming is actually "evaluated"
590     // even though it is used later somewhere else.
591     Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI),
592                                MaxRecurse);
593     // If the operation failed to simplify, or simplified to a different value
594     // to previously, then give up.
595     if (!V || (CommonValue && V != CommonValue))
596       return nullptr;
597     CommonValue = V;
598   }
599 
600   return CommonValue;
601 }
602 
603 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
604                                        Value *&Op0, Value *&Op1,
605                                        const SimplifyQuery &Q) {
606   if (auto *CLHS = dyn_cast<Constant>(Op0)) {
607     if (auto *CRHS = dyn_cast<Constant>(Op1))
608       return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
609 
610     // Canonicalize the constant to the RHS if this is a commutative operation.
611     if (Instruction::isCommutative(Opcode))
612       std::swap(Op0, Op1);
613   }
614   return nullptr;
615 }
616 
617 /// Given operands for an Add, see if we can fold the result.
618 /// If not, this returns null.
619 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
620                               const SimplifyQuery &Q, unsigned MaxRecurse) {
621   if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
622     return C;
623 
624   // X + poison -> poison
625   if (isa<PoisonValue>(Op1))
626     return Op1;
627 
628   // X + undef -> undef
629   if (Q.isUndefValue(Op1))
630     return Op1;
631 
632   // X + 0 -> X
633   if (match(Op1, m_Zero()))
634     return Op0;
635 
636   // If two operands are negative, return 0.
637   if (isKnownNegation(Op0, Op1))
638     return Constant::getNullValue(Op0->getType());
639 
640   // X + (Y - X) -> Y
641   // (Y - X) + X -> Y
642   // Eg: X + -X -> 0
643   Value *Y = nullptr;
644   if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
645       match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
646     return Y;
647 
648   // X + ~X -> -1   since   ~X = -X-1
649   Type *Ty = Op0->getType();
650   if (match(Op0, m_Not(m_Specific(Op1))) ||
651       match(Op1, m_Not(m_Specific(Op0))))
652     return Constant::getAllOnesValue(Ty);
653 
654   // add nsw/nuw (xor Y, signmask), signmask --> Y
655   // The no-wrapping add guarantees that the top bit will be set by the add.
656   // Therefore, the xor must be clearing the already set sign bit of Y.
657   if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
658       match(Op0, m_Xor(m_Value(Y), m_SignMask())))
659     return Y;
660 
661   // add nuw %x, -1  ->  -1, because %x can only be 0.
662   if (IsNUW && match(Op1, m_AllOnes()))
663     return Op1; // Which is -1.
664 
665   /// i1 add -> xor.
666   if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
667     if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
668       return V;
669 
670   // Try some generic simplifications for associative operations.
671   if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q,
672                                           MaxRecurse))
673     return V;
674 
675   // Threading Add over selects and phi nodes is pointless, so don't bother.
676   // Threading over the select in "A + select(cond, B, C)" means evaluating
677   // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
678   // only if B and C are equal.  If B and C are equal then (since we assume
679   // that operands have already been simplified) "select(cond, B, C)" should
680   // have been simplified to the common value of B and C already.  Analysing
681   // "A+B" and "A+C" thus gains nothing, but costs compile time.  Similarly
682   // for threading over phi nodes.
683 
684   return nullptr;
685 }
686 
687 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
688                              const SimplifyQuery &Query) {
689   return ::SimplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
690 }
691 
692 /// Compute the base pointer and cumulative constant offsets for V.
693 ///
694 /// This strips all constant offsets off of V, leaving it the base pointer, and
695 /// accumulates the total constant offset applied in the returned constant.
696 /// It returns zero if there are no constant offsets applied.
697 ///
698 /// This is very similar to stripAndAccumulateConstantOffsets(), except it
699 /// normalizes the offset bitwidth to the stripped pointer type, not the
700 /// original pointer type.
701 static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
702                                             bool AllowNonInbounds = false) {
703   assert(V->getType()->isPtrOrPtrVectorTy());
704 
705   APInt Offset = APInt::getZero(DL.getIndexTypeSizeInBits(V->getType()));
706   V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
707   // As that strip may trace through `addrspacecast`, need to sext or trunc
708   // the offset calculated.
709   return Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(V->getType()));
710 }
711 
712 /// Compute the constant difference between two pointer values.
713 /// If the difference is not a constant, returns zero.
714 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
715                                           Value *RHS) {
716   APInt LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
717   APInt RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
718 
719   // If LHS and RHS are not related via constant offsets to the same base
720   // value, there is nothing we can do here.
721   if (LHS != RHS)
722     return nullptr;
723 
724   // Otherwise, the difference of LHS - RHS can be computed as:
725   //    LHS - RHS
726   //  = (LHSOffset + Base) - (RHSOffset + Base)
727   //  = LHSOffset - RHSOffset
728   Constant *Res = ConstantInt::get(LHS->getContext(), LHSOffset - RHSOffset);
729   if (auto *VecTy = dyn_cast<VectorType>(LHS->getType()))
730     Res = ConstantVector::getSplat(VecTy->getElementCount(), Res);
731   return Res;
732 }
733 
734 /// Given operands for a Sub, see if we can fold the result.
735 /// If not, this returns null.
736 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
737                               const SimplifyQuery &Q, unsigned MaxRecurse) {
738   if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
739     return C;
740 
741   // X - poison -> poison
742   // poison - X -> poison
743   if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
744     return PoisonValue::get(Op0->getType());
745 
746   // X - undef -> undef
747   // undef - X -> undef
748   if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
749     return UndefValue::get(Op0->getType());
750 
751   // X - 0 -> X
752   if (match(Op1, m_Zero()))
753     return Op0;
754 
755   // X - X -> 0
756   if (Op0 == Op1)
757     return Constant::getNullValue(Op0->getType());
758 
759   // Is this a negation?
760   if (match(Op0, m_Zero())) {
761     // 0 - X -> 0 if the sub is NUW.
762     if (isNUW)
763       return Constant::getNullValue(Op0->getType());
764 
765     KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
766     if (Known.Zero.isMaxSignedValue()) {
767       // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
768       // Op1 must be 0 because negating the minimum signed value is undefined.
769       if (isNSW)
770         return Constant::getNullValue(Op0->getType());
771 
772       // 0 - X -> X if X is 0 or the minimum signed value.
773       return Op1;
774     }
775   }
776 
777   // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
778   // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
779   Value *X = nullptr, *Y = nullptr, *Z = Op1;
780   if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
781     // See if "V === Y - Z" simplifies.
782     if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
783       // It does!  Now see if "X + V" simplifies.
784       if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) {
785         // It does, we successfully reassociated!
786         ++NumReassoc;
787         return W;
788       }
789     // See if "V === X - Z" simplifies.
790     if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
791       // It does!  Now see if "Y + V" simplifies.
792       if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) {
793         // It does, we successfully reassociated!
794         ++NumReassoc;
795         return W;
796       }
797   }
798 
799   // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
800   // For example, X - (X + 1) -> -1
801   X = Op0;
802   if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
803     // See if "V === X - Y" simplifies.
804     if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
805       // It does!  Now see if "V - Z" simplifies.
806       if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) {
807         // It does, we successfully reassociated!
808         ++NumReassoc;
809         return W;
810       }
811     // See if "V === X - Z" simplifies.
812     if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
813       // It does!  Now see if "V - Y" simplifies.
814       if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) {
815         // It does, we successfully reassociated!
816         ++NumReassoc;
817         return W;
818       }
819   }
820 
821   // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
822   // For example, X - (X - Y) -> Y.
823   Z = Op0;
824   if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
825     // See if "V === Z - X" simplifies.
826     if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1))
827       // It does!  Now see if "V + Y" simplifies.
828       if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) {
829         // It does, we successfully reassociated!
830         ++NumReassoc;
831         return W;
832       }
833 
834   // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
835   if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
836       match(Op1, m_Trunc(m_Value(Y))))
837     if (X->getType() == Y->getType())
838       // See if "V === X - Y" simplifies.
839       if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
840         // It does!  Now see if "trunc V" simplifies.
841         if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(),
842                                         Q, MaxRecurse - 1))
843           // It does, return the simplified "trunc V".
844           return W;
845 
846   // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
847   if (match(Op0, m_PtrToInt(m_Value(X))) &&
848       match(Op1, m_PtrToInt(m_Value(Y))))
849     if (Constant *Result = computePointerDifference(Q.DL, X, Y))
850       return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
851 
852   // i1 sub -> xor.
853   if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
854     if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
855       return V;
856 
857   // Threading Sub over selects and phi nodes is pointless, so don't bother.
858   // Threading over the select in "A - select(cond, B, C)" means evaluating
859   // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
860   // only if B and C are equal.  If B and C are equal then (since we assume
861   // that operands have already been simplified) "select(cond, B, C)" should
862   // have been simplified to the common value of B and C already.  Analysing
863   // "A-B" and "A-C" thus gains nothing, but costs compile time.  Similarly
864   // for threading over phi nodes.
865 
866   return nullptr;
867 }
868 
869 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
870                              const SimplifyQuery &Q) {
871   return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
872 }
873 
874 /// Given operands for a Mul, see if we can fold the result.
875 /// If not, this returns null.
876 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
877                               unsigned MaxRecurse) {
878   if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
879     return C;
880 
881   // X * poison -> poison
882   if (isa<PoisonValue>(Op1))
883     return Op1;
884 
885   // X * undef -> 0
886   // X * 0 -> 0
887   if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
888     return Constant::getNullValue(Op0->getType());
889 
890   // X * 1 -> X
891   if (match(Op1, m_One()))
892     return Op0;
893 
894   // (X / Y) * Y -> X if the division is exact.
895   Value *X = nullptr;
896   if (Q.IIQ.UseInstrInfo &&
897       (match(Op0,
898              m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) ||     // (X / Y) * Y
899        match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
900     return X;
901 
902   // i1 mul -> and.
903   if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
904     if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
905       return V;
906 
907   // Try some generic simplifications for associative operations.
908   if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q,
909                                           MaxRecurse))
910     return V;
911 
912   // Mul distributes over Add. Try some generic simplifications based on this.
913   if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
914                                         Instruction::Add, Q, MaxRecurse))
915     return V;
916 
917   // If the operation is with the result of a select instruction, check whether
918   // operating on either branch of the select always yields the same value.
919   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
920     if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q,
921                                          MaxRecurse))
922       return V;
923 
924   // If the operation is with the result of a phi instruction, check whether
925   // operating on all incoming values of the phi always yields the same value.
926   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
927     if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q,
928                                       MaxRecurse))
929       return V;
930 
931   return nullptr;
932 }
933 
934 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
935   return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit);
936 }
937 
938 /// Check for common or similar folds of integer division or integer remainder.
939 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
940 static Value *simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0,
941                              Value *Op1, const SimplifyQuery &Q) {
942   bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
943   bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
944 
945   Type *Ty = Op0->getType();
946 
947   // X / undef -> poison
948   // X % undef -> poison
949   if (Q.isUndefValue(Op1) || isa<PoisonValue>(Op1))
950     return PoisonValue::get(Ty);
951 
952   // X / 0 -> poison
953   // X % 0 -> poison
954   // We don't need to preserve faults!
955   if (match(Op1, m_Zero()))
956     return PoisonValue::get(Ty);
957 
958   // If any element of a constant divisor fixed width vector is zero or undef
959   // the behavior is undefined and we can fold the whole op to poison.
960   auto *Op1C = dyn_cast<Constant>(Op1);
961   auto *VTy = dyn_cast<FixedVectorType>(Ty);
962   if (Op1C && VTy) {
963     unsigned NumElts = VTy->getNumElements();
964     for (unsigned i = 0; i != NumElts; ++i) {
965       Constant *Elt = Op1C->getAggregateElement(i);
966       if (Elt && (Elt->isNullValue() || Q.isUndefValue(Elt)))
967         return PoisonValue::get(Ty);
968     }
969   }
970 
971   // poison / X -> poison
972   // poison % X -> poison
973   if (isa<PoisonValue>(Op0))
974     return Op0;
975 
976   // undef / X -> 0
977   // undef % X -> 0
978   if (Q.isUndefValue(Op0))
979     return Constant::getNullValue(Ty);
980 
981   // 0 / X -> 0
982   // 0 % X -> 0
983   if (match(Op0, m_Zero()))
984     return Constant::getNullValue(Op0->getType());
985 
986   // X / X -> 1
987   // X % X -> 0
988   if (Op0 == Op1)
989     return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
990 
991   // X / 1 -> X
992   // X % 1 -> 0
993   // If this is a boolean op (single-bit element type), we can't have
994   // division-by-zero or remainder-by-zero, so assume the divisor is 1.
995   // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1.
996   Value *X;
997   if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1) ||
998       (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
999     return IsDiv ? Op0 : Constant::getNullValue(Ty);
1000 
1001   // If X * Y does not overflow, then:
1002   //   X * Y / Y -> X
1003   //   X * Y % Y -> 0
1004   if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1005     auto *Mul = cast<OverflowingBinaryOperator>(Op0);
1006     // The multiplication can't overflow if it is defined not to, or if
1007     // X == A / Y for some A.
1008     if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1009         (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
1010         (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1011         (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) {
1012       return IsDiv ? X : Constant::getNullValue(Op0->getType());
1013     }
1014   }
1015 
1016   return nullptr;
1017 }
1018 
1019 /// Given a predicate and two operands, return true if the comparison is true.
1020 /// This is a helper for div/rem simplification where we return some other value
1021 /// when we can prove a relationship between the operands.
1022 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS,
1023                        const SimplifyQuery &Q, unsigned MaxRecurse) {
1024   Value *V = SimplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
1025   Constant *C = dyn_cast_or_null<Constant>(V);
1026   return (C && C->isAllOnesValue());
1027 }
1028 
1029 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
1030 /// to simplify X % Y to X.
1031 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
1032                       unsigned MaxRecurse, bool IsSigned) {
1033   // Recursion is always used, so bail out at once if we already hit the limit.
1034   if (!MaxRecurse--)
1035     return false;
1036 
1037   if (IsSigned) {
1038     // |X| / |Y| --> 0
1039     //
1040     // We require that 1 operand is a simple constant. That could be extended to
1041     // 2 variables if we computed the sign bit for each.
1042     //
1043     // Make sure that a constant is not the minimum signed value because taking
1044     // the abs() of that is undefined.
1045     Type *Ty = X->getType();
1046     const APInt *C;
1047     if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
1048       // Is the variable divisor magnitude always greater than the constant
1049       // dividend magnitude?
1050       // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1051       Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
1052       Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1053       if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
1054           isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
1055         return true;
1056     }
1057     if (match(Y, m_APInt(C))) {
1058       // Special-case: we can't take the abs() of a minimum signed value. If
1059       // that's the divisor, then all we have to do is prove that the dividend
1060       // is also not the minimum signed value.
1061       if (C->isMinSignedValue())
1062         return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1063 
1064       // Is the variable dividend magnitude always less than the constant
1065       // divisor magnitude?
1066       // |X| < |C| --> X > -abs(C) and X < abs(C)
1067       Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1068       Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1069       if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1070           isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1071         return true;
1072     }
1073     return false;
1074   }
1075 
1076   // IsSigned == false.
1077 
1078   // Is the unsigned dividend known to be less than a constant divisor?
1079   // TODO: Convert this (and above) to range analysis
1080   //      ("computeConstantRangeIncludingKnownBits")?
1081   const APInt *C;
1082   if (match(Y, m_APInt(C)) &&
1083       computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI, Q.DT).getMaxValue().ult(*C))
1084     return true;
1085 
1086   // Try again for any divisor:
1087   // Is the dividend unsigned less than the divisor?
1088   return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1089 }
1090 
1091 /// These are simplifications common to SDiv and UDiv.
1092 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1093                           const SimplifyQuery &Q, unsigned MaxRecurse) {
1094   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1095     return C;
1096 
1097   if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q))
1098     return V;
1099 
1100   bool IsSigned = Opcode == Instruction::SDiv;
1101 
1102   // (X rem Y) / Y -> 0
1103   if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1104       (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1105     return Constant::getNullValue(Op0->getType());
1106 
1107   // (X /u C1) /u C2 -> 0 if C1 * C2 overflow
1108   ConstantInt *C1, *C2;
1109   if (!IsSigned && match(Op0, m_UDiv(m_Value(), m_ConstantInt(C1))) &&
1110       match(Op1, m_ConstantInt(C2))) {
1111     bool Overflow;
1112     (void)C1->getValue().umul_ov(C2->getValue(), Overflow);
1113     if (Overflow)
1114       return Constant::getNullValue(Op0->getType());
1115   }
1116 
1117   // If the operation is with the result of a select instruction, check whether
1118   // operating on either branch of the select always yields the same value.
1119   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1120     if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1121       return V;
1122 
1123   // If the operation is with the result of a phi instruction, check whether
1124   // operating on all incoming values of the phi always yields the same value.
1125   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1126     if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1127       return V;
1128 
1129   if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1130     return Constant::getNullValue(Op0->getType());
1131 
1132   return nullptr;
1133 }
1134 
1135 /// These are simplifications common to SRem and URem.
1136 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1137                           const SimplifyQuery &Q, unsigned MaxRecurse) {
1138   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1139     return C;
1140 
1141   if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q))
1142     return V;
1143 
1144   // (X % Y) % Y -> X % Y
1145   if ((Opcode == Instruction::SRem &&
1146        match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1147       (Opcode == Instruction::URem &&
1148        match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1149     return Op0;
1150 
1151   // (X << Y) % X -> 0
1152   if (Q.IIQ.UseInstrInfo &&
1153       ((Opcode == Instruction::SRem &&
1154         match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1155        (Opcode == Instruction::URem &&
1156         match(Op0, m_NUWShl(m_Specific(Op1), m_Value())))))
1157     return Constant::getNullValue(Op0->getType());
1158 
1159   // If the operation is with the result of a select instruction, check whether
1160   // operating on either branch of the select always yields the same value.
1161   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1162     if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1163       return V;
1164 
1165   // If the operation is with the result of a phi instruction, check whether
1166   // operating on all incoming values of the phi always yields the same value.
1167   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1168     if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1169       return V;
1170 
1171   // If X / Y == 0, then X % Y == X.
1172   if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem))
1173     return Op0;
1174 
1175   return nullptr;
1176 }
1177 
1178 /// Given operands for an SDiv, see if we can fold the result.
1179 /// If not, this returns null.
1180 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1181                                unsigned MaxRecurse) {
1182   // If two operands are negated and no signed overflow, return -1.
1183   if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1184     return Constant::getAllOnesValue(Op0->getType());
1185 
1186   return simplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse);
1187 }
1188 
1189 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1190   return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit);
1191 }
1192 
1193 /// Given operands for a UDiv, see if we can fold the result.
1194 /// If not, this returns null.
1195 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1196                                unsigned MaxRecurse) {
1197   return simplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse);
1198 }
1199 
1200 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1201   return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit);
1202 }
1203 
1204 /// Given operands for an SRem, see if we can fold the result.
1205 /// If not, this returns null.
1206 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1207                                unsigned MaxRecurse) {
1208   // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1209   // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1210   Value *X;
1211   if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1212     return ConstantInt::getNullValue(Op0->getType());
1213 
1214   // If the two operands are negated, return 0.
1215   if (isKnownNegation(Op0, Op1))
1216     return ConstantInt::getNullValue(Op0->getType());
1217 
1218   return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1219 }
1220 
1221 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1222   return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit);
1223 }
1224 
1225 /// Given operands for a URem, see if we can fold the result.
1226 /// If not, this returns null.
1227 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1228                                unsigned MaxRecurse) {
1229   return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1230 }
1231 
1232 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1233   return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit);
1234 }
1235 
1236 /// Returns true if a shift by \c Amount always yields poison.
1237 static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1238   Constant *C = dyn_cast<Constant>(Amount);
1239   if (!C)
1240     return false;
1241 
1242   // X shift by undef -> poison because it may shift by the bitwidth.
1243   if (Q.isUndefValue(C))
1244     return true;
1245 
1246   // Shifting by the bitwidth or more is undefined.
1247   if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1248     if (CI->getValue().uge(CI->getType()->getScalarSizeInBits()))
1249       return true;
1250 
1251   // If all lanes of a vector shift are undefined the whole shift is.
1252   if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1253     for (unsigned I = 0,
1254                   E = cast<FixedVectorType>(C->getType())->getNumElements();
1255          I != E; ++I)
1256       if (!isPoisonShift(C->getAggregateElement(I), Q))
1257         return false;
1258     return true;
1259   }
1260 
1261   return false;
1262 }
1263 
1264 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1265 /// If not, this returns null.
1266 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1267                             Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1268                             unsigned MaxRecurse) {
1269   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1270     return C;
1271 
1272   // poison shift by X -> poison
1273   if (isa<PoisonValue>(Op0))
1274     return Op0;
1275 
1276   // 0 shift by X -> 0
1277   if (match(Op0, m_Zero()))
1278     return Constant::getNullValue(Op0->getType());
1279 
1280   // X shift by 0 -> X
1281   // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1282   // would be poison.
1283   Value *X;
1284   if (match(Op1, m_Zero()) ||
1285       (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1286     return Op0;
1287 
1288   // Fold undefined shifts.
1289   if (isPoisonShift(Op1, Q))
1290     return PoisonValue::get(Op0->getType());
1291 
1292   // If the operation is with the result of a select instruction, check whether
1293   // operating on either branch of the select always yields the same value.
1294   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1295     if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1296       return V;
1297 
1298   // If the operation is with the result of a phi instruction, check whether
1299   // operating on all incoming values of the phi always yields the same value.
1300   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1301     if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1302       return V;
1303 
1304   // If any bits in the shift amount make that value greater than or equal to
1305   // the number of bits in the type, the shift is undefined.
1306   KnownBits KnownAmt = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1307   if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1308     return PoisonValue::get(Op0->getType());
1309 
1310   // If all valid bits in the shift amount are known zero, the first operand is
1311   // unchanged.
1312   unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
1313   if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1314     return Op0;
1315 
1316   // Check for nsw shl leading to a poison value.
1317   if (IsNSW) {
1318     assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1319     KnownBits KnownVal = computeKnownBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1320     KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
1321 
1322     if (KnownVal.Zero.isSignBitSet())
1323       KnownShl.Zero.setSignBit();
1324     if (KnownVal.One.isSignBitSet())
1325       KnownShl.One.setSignBit();
1326 
1327     if (KnownShl.hasConflict())
1328       return PoisonValue::get(Op0->getType());
1329   }
1330 
1331   return nullptr;
1332 }
1333 
1334 /// Given operands for an Shl, LShr or AShr, see if we can
1335 /// fold the result.  If not, this returns null.
1336 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1337                                  Value *Op1, bool isExact, const SimplifyQuery &Q,
1338                                  unsigned MaxRecurse) {
1339   if (Value *V =
1340           SimplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1341     return V;
1342 
1343   // X >> X -> 0
1344   if (Op0 == Op1)
1345     return Constant::getNullValue(Op0->getType());
1346 
1347   // undef >> X -> 0
1348   // undef >> X -> undef (if it's exact)
1349   if (Q.isUndefValue(Op0))
1350     return isExact ? Op0 : Constant::getNullValue(Op0->getType());
1351 
1352   // The low bit cannot be shifted out of an exact shift if it is set.
1353   if (isExact) {
1354     KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
1355     if (Op0Known.One[0])
1356       return Op0;
1357   }
1358 
1359   return nullptr;
1360 }
1361 
1362 /// Given operands for an Shl, see if we can fold the result.
1363 /// If not, this returns null.
1364 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1365                               const SimplifyQuery &Q, unsigned MaxRecurse) {
1366   if (Value *V =
1367           SimplifyShift(Instruction::Shl, Op0, Op1, isNSW, Q, MaxRecurse))
1368     return V;
1369 
1370   // undef << X -> 0
1371   // undef << X -> undef if (if it's NSW/NUW)
1372   if (Q.isUndefValue(Op0))
1373     return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType());
1374 
1375   // (X >> A) << A -> X
1376   Value *X;
1377   if (Q.IIQ.UseInstrInfo &&
1378       match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1379     return X;
1380 
1381   // shl nuw i8 C, %x  ->  C  iff C has sign bit set.
1382   if (isNUW && match(Op0, m_Negative()))
1383     return Op0;
1384   // NOTE: could use computeKnownBits() / LazyValueInfo,
1385   // but the cost-benefit analysis suggests it isn't worth it.
1386 
1387   return nullptr;
1388 }
1389 
1390 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1391                              const SimplifyQuery &Q) {
1392   return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
1393 }
1394 
1395 /// Given operands for an LShr, see if we can fold the result.
1396 /// If not, this returns null.
1397 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1398                                const SimplifyQuery &Q, unsigned MaxRecurse) {
1399   if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q,
1400                                     MaxRecurse))
1401       return V;
1402 
1403   // (X << A) >> A -> X
1404   Value *X;
1405   if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1406     return X;
1407 
1408   // ((X << A) | Y) >> A -> X  if effective width of Y is not larger than A.
1409   // We can return X as we do in the above case since OR alters no bits in X.
1410   // SimplifyDemandedBits in InstCombine can do more general optimization for
1411   // bit manipulation. This pattern aims to provide opportunities for other
1412   // optimizers by supporting a simple but common case in InstSimplify.
1413   Value *Y;
1414   const APInt *ShRAmt, *ShLAmt;
1415   if (match(Op1, m_APInt(ShRAmt)) &&
1416       match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1417       *ShRAmt == *ShLAmt) {
1418     const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1419     const unsigned EffWidthY = YKnown.countMaxActiveBits();
1420     if (ShRAmt->uge(EffWidthY))
1421       return X;
1422   }
1423 
1424   return nullptr;
1425 }
1426 
1427 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1428                               const SimplifyQuery &Q) {
1429   return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1430 }
1431 
1432 /// Given operands for an AShr, see if we can fold the result.
1433 /// If not, this returns null.
1434 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1435                                const SimplifyQuery &Q, unsigned MaxRecurse) {
1436   if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q,
1437                                     MaxRecurse))
1438     return V;
1439 
1440   // -1 >>a X --> -1
1441   // (-1 << X) a>> X --> -1
1442   // Do not return Op0 because it may contain undef elements if it's a vector.
1443   if (match(Op0, m_AllOnes()) ||
1444       match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1))))
1445     return Constant::getAllOnesValue(Op0->getType());
1446 
1447   // (X << A) >> A -> X
1448   Value *X;
1449   if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1450     return X;
1451 
1452   // Arithmetic shifting an all-sign-bit value is a no-op.
1453   unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1454   if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1455     return Op0;
1456 
1457   return nullptr;
1458 }
1459 
1460 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1461                               const SimplifyQuery &Q) {
1462   return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1463 }
1464 
1465 /// Commuted variants are assumed to be handled by calling this function again
1466 /// with the parameters swapped.
1467 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1468                                          ICmpInst *UnsignedICmp, bool IsAnd,
1469                                          const SimplifyQuery &Q) {
1470   Value *X, *Y;
1471 
1472   ICmpInst::Predicate EqPred;
1473   if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1474       !ICmpInst::isEquality(EqPred))
1475     return nullptr;
1476 
1477   ICmpInst::Predicate UnsignedPred;
1478 
1479   Value *A, *B;
1480   // Y = (A - B);
1481   if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1482     if (match(UnsignedICmp,
1483               m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1484         ICmpInst::isUnsigned(UnsignedPred)) {
1485       // A >=/<= B || (A - B) != 0  <-->  true
1486       if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1487            UnsignedPred == ICmpInst::ICMP_ULE) &&
1488           EqPred == ICmpInst::ICMP_NE && !IsAnd)
1489         return ConstantInt::getTrue(UnsignedICmp->getType());
1490       // A </> B && (A - B) == 0  <-->  false
1491       if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1492            UnsignedPred == ICmpInst::ICMP_UGT) &&
1493           EqPred == ICmpInst::ICMP_EQ && IsAnd)
1494         return ConstantInt::getFalse(UnsignedICmp->getType());
1495 
1496       // A </> B && (A - B) != 0  <-->  A </> B
1497       // A </> B || (A - B) != 0  <-->  (A - B) != 0
1498       if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1499                                           UnsignedPred == ICmpInst::ICMP_UGT))
1500         return IsAnd ? UnsignedICmp : ZeroICmp;
1501 
1502       // A <=/>= B && (A - B) == 0  <-->  (A - B) == 0
1503       // A <=/>= B || (A - B) == 0  <-->  A <=/>= B
1504       if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1505                                           UnsignedPred == ICmpInst::ICMP_UGE))
1506         return IsAnd ? ZeroICmp : UnsignedICmp;
1507     }
1508 
1509     // Given  Y = (A - B)
1510     //   Y >= A && Y != 0  --> Y >= A  iff B != 0
1511     //   Y <  A || Y == 0  --> Y <  A  iff B != 0
1512     if (match(UnsignedICmp,
1513               m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1514       if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1515           EqPred == ICmpInst::ICMP_NE &&
1516           isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1517         return UnsignedICmp;
1518       if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1519           EqPred == ICmpInst::ICMP_EQ &&
1520           isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1521         return UnsignedICmp;
1522     }
1523   }
1524 
1525   if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1526       ICmpInst::isUnsigned(UnsignedPred))
1527     ;
1528   else if (match(UnsignedICmp,
1529                  m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1530            ICmpInst::isUnsigned(UnsignedPred))
1531     UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1532   else
1533     return nullptr;
1534 
1535   // X > Y && Y == 0  -->  Y == 0  iff X != 0
1536   // X > Y || Y == 0  -->  X > Y   iff X != 0
1537   if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1538       isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1539     return IsAnd ? ZeroICmp : UnsignedICmp;
1540 
1541   // X <= Y && Y != 0  -->  X <= Y  iff X != 0
1542   // X <= Y || Y != 0  -->  Y != 0  iff X != 0
1543   if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1544       isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1545     return IsAnd ? UnsignedICmp : ZeroICmp;
1546 
1547   // The transforms below here are expected to be handled more generally with
1548   // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1549   // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1550   // these are candidates for removal.
1551 
1552   // X < Y && Y != 0  -->  X < Y
1553   // X < Y || Y != 0  -->  Y != 0
1554   if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1555     return IsAnd ? UnsignedICmp : ZeroICmp;
1556 
1557   // X >= Y && Y == 0  -->  Y == 0
1558   // X >= Y || Y == 0  -->  X >= Y
1559   if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1560     return IsAnd ? ZeroICmp : UnsignedICmp;
1561 
1562   // X < Y && Y == 0  -->  false
1563   if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1564       IsAnd)
1565     return getFalse(UnsignedICmp->getType());
1566 
1567   // X >= Y || Y != 0  -->  true
1568   if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1569       !IsAnd)
1570     return getTrue(UnsignedICmp->getType());
1571 
1572   return nullptr;
1573 }
1574 
1575 /// Commuted variants are assumed to be handled by calling this function again
1576 /// with the parameters swapped.
1577 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1578   ICmpInst::Predicate Pred0, Pred1;
1579   Value *A ,*B;
1580   if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1581       !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1582     return nullptr;
1583 
1584   // We have (icmp Pred0, A, B) & (icmp Pred1, A, B).
1585   // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1586   // can eliminate Op1 from this 'and'.
1587   if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1588     return Op0;
1589 
1590   // Check for any combination of predicates that are guaranteed to be disjoint.
1591   if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1592       (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) ||
1593       (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) ||
1594       (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT))
1595     return getFalse(Op0->getType());
1596 
1597   return nullptr;
1598 }
1599 
1600 /// Commuted variants are assumed to be handled by calling this function again
1601 /// with the parameters swapped.
1602 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1603   ICmpInst::Predicate Pred0, Pred1;
1604   Value *A ,*B;
1605   if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1606       !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1607     return nullptr;
1608 
1609   // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
1610   // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1611   // can eliminate Op0 from this 'or'.
1612   if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1613     return Op1;
1614 
1615   // Check for any combination of predicates that cover the entire range of
1616   // possibilities.
1617   if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1618       (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
1619       (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
1620       (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
1621     return getTrue(Op0->getType());
1622 
1623   return nullptr;
1624 }
1625 
1626 /// Test if a pair of compares with a shared operand and 2 constants has an
1627 /// empty set intersection, full set union, or if one compare is a superset of
1628 /// the other.
1629 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1630                                                 bool IsAnd) {
1631   // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1632   if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1633     return nullptr;
1634 
1635   const APInt *C0, *C1;
1636   if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1637       !match(Cmp1->getOperand(1), m_APInt(C1)))
1638     return nullptr;
1639 
1640   auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1641   auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1642 
1643   // For and-of-compares, check if the intersection is empty:
1644   // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1645   if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1646     return getFalse(Cmp0->getType());
1647 
1648   // For or-of-compares, check if the union is full:
1649   // (icmp X, C0) || (icmp X, C1) --> full set --> true
1650   if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1651     return getTrue(Cmp0->getType());
1652 
1653   // Is one range a superset of the other?
1654   // If this is and-of-compares, take the smaller set:
1655   // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1656   // If this is or-of-compares, take the larger set:
1657   // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1658   if (Range0.contains(Range1))
1659     return IsAnd ? Cmp1 : Cmp0;
1660   if (Range1.contains(Range0))
1661     return IsAnd ? Cmp0 : Cmp1;
1662 
1663   return nullptr;
1664 }
1665 
1666 static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1,
1667                                            bool IsAnd) {
1668   ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate();
1669   if (!match(Cmp0->getOperand(1), m_Zero()) ||
1670       !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1)
1671     return nullptr;
1672 
1673   if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ))
1674     return nullptr;
1675 
1676   // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)".
1677   Value *X = Cmp0->getOperand(0);
1678   Value *Y = Cmp1->getOperand(0);
1679 
1680   // If one of the compares is a masked version of a (not) null check, then
1681   // that compare implies the other, so we eliminate the other. Optionally, look
1682   // through a pointer-to-int cast to match a null check of a pointer type.
1683 
1684   // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0
1685   // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0
1686   // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0
1687   // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0
1688   if (match(Y, m_c_And(m_Specific(X), m_Value())) ||
1689       match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value())))
1690     return Cmp1;
1691 
1692   // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0
1693   // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0
1694   // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0
1695   // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0
1696   if (match(X, m_c_And(m_Specific(Y), m_Value())) ||
1697       match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value())))
1698     return Cmp0;
1699 
1700   return nullptr;
1701 }
1702 
1703 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1704                                         const InstrInfoQuery &IIQ) {
1705   // (icmp (add V, C0), C1) & (icmp V, C0)
1706   ICmpInst::Predicate Pred0, Pred1;
1707   const APInt *C0, *C1;
1708   Value *V;
1709   if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1710     return nullptr;
1711 
1712   if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1713     return nullptr;
1714 
1715   auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1716   if (AddInst->getOperand(1) != Op1->getOperand(1))
1717     return nullptr;
1718 
1719   Type *ITy = Op0->getType();
1720   bool isNSW = IIQ.hasNoSignedWrap(AddInst);
1721   bool isNUW = IIQ.hasNoUnsignedWrap(AddInst);
1722 
1723   const APInt Delta = *C1 - *C0;
1724   if (C0->isStrictlyPositive()) {
1725     if (Delta == 2) {
1726       if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1727         return getFalse(ITy);
1728       if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1729         return getFalse(ITy);
1730     }
1731     if (Delta == 1) {
1732       if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1733         return getFalse(ITy);
1734       if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1735         return getFalse(ITy);
1736     }
1737   }
1738   if (C0->getBoolValue() && isNUW) {
1739     if (Delta == 2)
1740       if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1741         return getFalse(ITy);
1742     if (Delta == 1)
1743       if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1744         return getFalse(ITy);
1745   }
1746 
1747   return nullptr;
1748 }
1749 
1750 /// Try to eliminate compares with signed or unsigned min/max constants.
1751 static Value *simplifyAndOrOfICmpsWithLimitConst(ICmpInst *Cmp0, ICmpInst *Cmp1,
1752                                                  bool IsAnd) {
1753   // Canonicalize an equality compare as Cmp0.
1754   if (Cmp1->isEquality())
1755     std::swap(Cmp0, Cmp1);
1756   if (!Cmp0->isEquality())
1757     return nullptr;
1758 
1759   // The non-equality compare must include a common operand (X). Canonicalize
1760   // the common operand as operand 0 (the predicate is swapped if the common
1761   // operand was operand 1).
1762   ICmpInst::Predicate Pred0 = Cmp0->getPredicate();
1763   Value *X = Cmp0->getOperand(0);
1764   ICmpInst::Predicate Pred1;
1765   bool HasNotOp = match(Cmp1, m_c_ICmp(Pred1, m_Not(m_Specific(X)), m_Value()));
1766   if (!HasNotOp && !match(Cmp1, m_c_ICmp(Pred1, m_Specific(X), m_Value())))
1767     return nullptr;
1768   if (ICmpInst::isEquality(Pred1))
1769     return nullptr;
1770 
1771   // The equality compare must be against a constant. Flip bits if we matched
1772   // a bitwise not. Convert a null pointer constant to an integer zero value.
1773   APInt MinMaxC;
1774   const APInt *C;
1775   if (match(Cmp0->getOperand(1), m_APInt(C)))
1776     MinMaxC = HasNotOp ? ~*C : *C;
1777   else if (isa<ConstantPointerNull>(Cmp0->getOperand(1)))
1778     MinMaxC = APInt::getZero(8);
1779   else
1780     return nullptr;
1781 
1782   // DeMorganize if this is 'or': P0 || P1 --> !P0 && !P1.
1783   if (!IsAnd) {
1784     Pred0 = ICmpInst::getInversePredicate(Pred0);
1785     Pred1 = ICmpInst::getInversePredicate(Pred1);
1786   }
1787 
1788   // Normalize to unsigned compare and unsigned min/max value.
1789   // Example for 8-bit: -128 + 128 -> 0; 127 + 128 -> 255
1790   if (ICmpInst::isSigned(Pred1)) {
1791     Pred1 = ICmpInst::getUnsignedPredicate(Pred1);
1792     MinMaxC += APInt::getSignedMinValue(MinMaxC.getBitWidth());
1793   }
1794 
1795   // (X != MAX) && (X < Y) --> X < Y
1796   // (X == MAX) || (X >= Y) --> X >= Y
1797   if (MinMaxC.isMaxValue())
1798     if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT)
1799       return Cmp1;
1800 
1801   // (X != MIN) && (X > Y) -->  X > Y
1802   // (X == MIN) || (X <= Y) --> X <= Y
1803   if (MinMaxC.isMinValue())
1804     if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_UGT)
1805       return Cmp1;
1806 
1807   return nullptr;
1808 }
1809 
1810 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1811                                  const SimplifyQuery &Q) {
1812   if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1813     return X;
1814   if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1815     return X;
1816 
1817   if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
1818     return X;
1819   if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0))
1820     return X;
1821 
1822   if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1823     return X;
1824 
1825   if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, true))
1826     return X;
1827 
1828   if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true))
1829     return X;
1830 
1831   if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1832     return X;
1833   if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1834     return X;
1835 
1836   return nullptr;
1837 }
1838 
1839 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1840                                        const InstrInfoQuery &IIQ) {
1841   // (icmp (add V, C0), C1) | (icmp V, C0)
1842   ICmpInst::Predicate Pred0, Pred1;
1843   const APInt *C0, *C1;
1844   Value *V;
1845   if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1846     return nullptr;
1847 
1848   if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1849     return nullptr;
1850 
1851   auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1852   if (AddInst->getOperand(1) != Op1->getOperand(1))
1853     return nullptr;
1854 
1855   Type *ITy = Op0->getType();
1856   bool isNSW = IIQ.hasNoSignedWrap(AddInst);
1857   bool isNUW = IIQ.hasNoUnsignedWrap(AddInst);
1858 
1859   const APInt Delta = *C1 - *C0;
1860   if (C0->isStrictlyPositive()) {
1861     if (Delta == 2) {
1862       if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1863         return getTrue(ITy);
1864       if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1865         return getTrue(ITy);
1866     }
1867     if (Delta == 1) {
1868       if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1869         return getTrue(ITy);
1870       if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1871         return getTrue(ITy);
1872     }
1873   }
1874   if (C0->getBoolValue() && isNUW) {
1875     if (Delta == 2)
1876       if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1877         return getTrue(ITy);
1878     if (Delta == 1)
1879       if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1880         return getTrue(ITy);
1881   }
1882 
1883   return nullptr;
1884 }
1885 
1886 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1887                                 const SimplifyQuery &Q) {
1888   if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1889     return X;
1890   if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1891     return X;
1892 
1893   if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
1894     return X;
1895   if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0))
1896     return X;
1897 
1898   if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1899     return X;
1900 
1901   if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, false))
1902     return X;
1903 
1904   if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false))
1905     return X;
1906 
1907   if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1908     return X;
1909   if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1910     return X;
1911 
1912   return nullptr;
1913 }
1914 
1915 static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI,
1916                                    FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) {
1917   Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1918   Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1919   if (LHS0->getType() != RHS0->getType())
1920     return nullptr;
1921 
1922   FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1923   if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1924       (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1925     // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y
1926     // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X
1927     // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y
1928     // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X
1929     // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y
1930     // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X
1931     // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
1932     // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
1933     if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) ||
1934         (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1)))
1935       return RHS;
1936 
1937     // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
1938     // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X
1939     // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y
1940     // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X
1941     // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y
1942     // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X
1943     // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
1944     // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
1945     if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) ||
1946         (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1)))
1947       return LHS;
1948   }
1949 
1950   return nullptr;
1951 }
1952 
1953 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q,
1954                                   Value *Op0, Value *Op1, bool IsAnd) {
1955   // Look through casts of the 'and' operands to find compares.
1956   auto *Cast0 = dyn_cast<CastInst>(Op0);
1957   auto *Cast1 = dyn_cast<CastInst>(Op1);
1958   if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1959       Cast0->getSrcTy() == Cast1->getSrcTy()) {
1960     Op0 = Cast0->getOperand(0);
1961     Op1 = Cast1->getOperand(0);
1962   }
1963 
1964   Value *V = nullptr;
1965   auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1966   auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1967   if (ICmp0 && ICmp1)
1968     V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
1969               : simplifyOrOfICmps(ICmp0, ICmp1, Q);
1970 
1971   auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1972   auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1973   if (FCmp0 && FCmp1)
1974     V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd);
1975 
1976   if (!V)
1977     return nullptr;
1978   if (!Cast0)
1979     return V;
1980 
1981   // If we looked through casts, we can only handle a constant simplification
1982   // because we are not allowed to create a cast instruction here.
1983   if (auto *C = dyn_cast<Constant>(V))
1984     return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType());
1985 
1986   return nullptr;
1987 }
1988 
1989 /// Given a bitwise logic op, check if the operands are add/sub with a common
1990 /// source value and inverted constant (identity: C - X -> ~(X + ~C)).
1991 static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1,
1992                                     Instruction::BinaryOps Opcode) {
1993   assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
1994   assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
1995   Value *X;
1996   Constant *C1, *C2;
1997   if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) &&
1998        match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) ||
1999       (match(Op1, m_Add(m_Value(X), m_Constant(C1))) &&
2000        match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) {
2001     if (ConstantExpr::getNot(C1) == C2) {
2002       // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
2003       // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
2004       // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
2005       Type *Ty = Op0->getType();
2006       return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
2007                                         : ConstantInt::getAllOnesValue(Ty);
2008     }
2009   }
2010   return nullptr;
2011 }
2012 
2013 /// Given operands for an And, see if we can fold the result.
2014 /// If not, this returns null.
2015 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2016                               unsigned MaxRecurse) {
2017   if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
2018     return C;
2019 
2020   // X & poison -> poison
2021   if (isa<PoisonValue>(Op1))
2022     return Op1;
2023 
2024   // X & undef -> 0
2025   if (Q.isUndefValue(Op1))
2026     return Constant::getNullValue(Op0->getType());
2027 
2028   // X & X = X
2029   if (Op0 == Op1)
2030     return Op0;
2031 
2032   // X & 0 = 0
2033   if (match(Op1, m_Zero()))
2034     return Constant::getNullValue(Op0->getType());
2035 
2036   // X & -1 = X
2037   if (match(Op1, m_AllOnes()))
2038     return Op0;
2039 
2040   // A & ~A  =  ~A & A  =  0
2041   if (match(Op0, m_Not(m_Specific(Op1))) ||
2042       match(Op1, m_Not(m_Specific(Op0))))
2043     return Constant::getNullValue(Op0->getType());
2044 
2045   // (A | ?) & A = A
2046   if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
2047     return Op1;
2048 
2049   // A & (A | ?) = A
2050   if (match(Op1, m_c_Or(m_Specific(Op0), m_Value())))
2051     return Op0;
2052 
2053   // (X | Y) & (X | ~Y) --> X (commuted 8 ways)
2054   Value *X, *Y;
2055   if (match(Op0, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
2056       match(Op1, m_c_Or(m_Deferred(X), m_Deferred(Y))))
2057     return X;
2058   if (match(Op1, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
2059       match(Op0, m_c_Or(m_Deferred(X), m_Deferred(Y))))
2060     return X;
2061 
2062   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2063     return V;
2064 
2065   // A mask that only clears known zeros of a shifted value is a no-op.
2066   const APInt *Mask;
2067   const APInt *ShAmt;
2068   if (match(Op1, m_APInt(Mask))) {
2069     // If all bits in the inverted and shifted mask are clear:
2070     // and (shl X, ShAmt), Mask --> shl X, ShAmt
2071     if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2072         (~(*Mask)).lshr(*ShAmt).isZero())
2073       return Op0;
2074 
2075     // If all bits in the inverted and shifted mask are clear:
2076     // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2077     if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2078         (~(*Mask)).shl(*ShAmt).isZero())
2079       return Op0;
2080   }
2081 
2082   // If we have a multiplication overflow check that is being 'and'ed with a
2083   // check that one of the multipliers is not zero, we can omit the 'and', and
2084   // only keep the overflow check.
2085   if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
2086     return Op1;
2087   if (isCheckForZeroAndMulWithOverflow(Op1, Op0, true))
2088     return Op0;
2089 
2090   // A & (-A) = A if A is a power of two or zero.
2091   if (match(Op0, m_Neg(m_Specific(Op1))) ||
2092       match(Op1, m_Neg(m_Specific(Op0)))) {
2093     if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2094                                Q.DT))
2095       return Op0;
2096     if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2097                                Q.DT))
2098       return Op1;
2099   }
2100 
2101   // This is a similar pattern used for checking if a value is a power-of-2:
2102   // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2103   // A & (A - 1) --> 0 (if A is a power-of-2 or 0)
2104   if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
2105       isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2106     return Constant::getNullValue(Op1->getType());
2107   if (match(Op1, m_Add(m_Specific(Op0), m_AllOnes())) &&
2108       isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2109     return Constant::getNullValue(Op0->getType());
2110 
2111   if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2112     return V;
2113 
2114   // Try some generic simplifications for associative operations.
2115   if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
2116                                           MaxRecurse))
2117     return V;
2118 
2119   // And distributes over Or.  Try some generic simplifications based on this.
2120   if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2121                                         Instruction::Or, Q, MaxRecurse))
2122     return V;
2123 
2124   // And distributes over Xor.  Try some generic simplifications based on this.
2125   if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2126                                         Instruction::Xor, Q, MaxRecurse))
2127     return V;
2128 
2129   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2130     if (Op0->getType()->isIntOrIntVectorTy(1)) {
2131       // A & (A && B) -> A && B
2132       if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero())))
2133         return Op1;
2134       else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero())))
2135         return Op0;
2136     }
2137     // If the operation is with the result of a select instruction, check
2138     // whether operating on either branch of the select always yields the same
2139     // value.
2140     if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q,
2141                                          MaxRecurse))
2142       return V;
2143   }
2144 
2145   // If the operation is with the result of a phi instruction, check whether
2146   // operating on all incoming values of the phi always yields the same value.
2147   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2148     if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q,
2149                                       MaxRecurse))
2150       return V;
2151 
2152   // Assuming the effective width of Y is not larger than A, i.e. all bits
2153   // from X and Y are disjoint in (X << A) | Y,
2154   // if the mask of this AND op covers all bits of X or Y, while it covers
2155   // no bits from the other, we can bypass this AND op. E.g.,
2156   // ((X << A) | Y) & Mask -> Y,
2157   //     if Mask = ((1 << effective_width_of(Y)) - 1)
2158   // ((X << A) | Y) & Mask -> X << A,
2159   //     if Mask = ((1 << effective_width_of(X)) - 1) << A
2160   // SimplifyDemandedBits in InstCombine can optimize the general case.
2161   // This pattern aims to help other passes for a common case.
2162   Value *XShifted;
2163   if (match(Op1, m_APInt(Mask)) &&
2164       match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)),
2165                                      m_Value(XShifted)),
2166                         m_Value(Y)))) {
2167     const unsigned Width = Op0->getType()->getScalarSizeInBits();
2168     const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2169     const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2170     const unsigned EffWidthY = YKnown.countMaxActiveBits();
2171     if (EffWidthY <= ShftCnt) {
2172       const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI,
2173                                                 Q.DT);
2174       const unsigned EffWidthX = XKnown.countMaxActiveBits();
2175       const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2176       const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2177       // If the mask is extracting all bits from X or Y as is, we can skip
2178       // this AND op.
2179       if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2180         return Y;
2181       if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2182         return XShifted;
2183     }
2184   }
2185 
2186   // ((X | Y) ^ X ) & ((X | Y) ^ Y) --> 0
2187   // ((X | Y) ^ Y ) & ((X | Y) ^ X) --> 0
2188   BinaryOperator *Or;
2189   if (match(Op0, m_c_Xor(m_Value(X),
2190                          m_CombineAnd(m_BinOp(Or),
2191                                       m_c_Or(m_Deferred(X), m_Value(Y))))) &&
2192       match(Op1, m_c_Xor(m_Specific(Or), m_Specific(Y))))
2193     return Constant::getNullValue(Op0->getType());
2194 
2195   return nullptr;
2196 }
2197 
2198 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2199   return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
2200 }
2201 
2202 static Value *simplifyOrLogic(Value *X, Value *Y) {
2203   assert(X->getType() == Y->getType() && "Expected same type for 'or' ops");
2204   Type *Ty = X->getType();
2205 
2206   // X | ~X --> -1
2207   if (match(Y, m_Not(m_Specific(X))))
2208     return ConstantInt::getAllOnesValue(Ty);
2209 
2210   // X | ~(X & ?) = -1
2211   if (match(Y, m_Not(m_c_And(m_Specific(X), m_Value()))))
2212     return ConstantInt::getAllOnesValue(Ty);
2213 
2214   // X | (X & ?) --> X
2215   if (match(Y, m_c_And(m_Specific(X), m_Value())))
2216     return X;
2217 
2218   Value *A, *B;
2219 
2220   // (A ^ B) | (A | B) --> A | B
2221   // (A ^ B) | (B | A) --> B | A
2222   if (match(X, m_Xor(m_Value(A), m_Value(B))) &&
2223       match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2224     return Y;
2225 
2226   // ~(A ^ B) | (A | B) --> -1
2227   // ~(A ^ B) | (B | A) --> -1
2228   if (match(X, m_Not(m_Xor(m_Value(A), m_Value(B)))) &&
2229       match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2230     return ConstantInt::getAllOnesValue(Ty);
2231 
2232   // (A & ~B) | (A ^ B) --> A ^ B
2233   // (~B & A) | (A ^ B) --> A ^ B
2234   // (A & ~B) | (B ^ A) --> B ^ A
2235   // (~B & A) | (B ^ A) --> B ^ A
2236   if (match(X, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2237       match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2238     return Y;
2239 
2240   // (~A ^ B) | (A & B) --> ~A ^ B
2241   // (B ^ ~A) | (A & B) --> B ^ ~A
2242   // (~A ^ B) | (B & A) --> ~A ^ B
2243   // (B ^ ~A) | (B & A) --> B ^ ~A
2244   if (match(X, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
2245       match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2246     return X;
2247 
2248   // (~A | B) | (A ^ B) --> -1
2249   // (~A | B) | (B ^ A) --> -1
2250   // (B | ~A) | (A ^ B) --> -1
2251   // (B | ~A) | (B ^ A) --> -1
2252   if (match(X, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2253       match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2254     return ConstantInt::getAllOnesValue(Ty);
2255 
2256   // (~A & B) | ~(A | B) --> ~A
2257   // (~A & B) | ~(B | A) --> ~A
2258   // (B & ~A) | ~(A | B) --> ~A
2259   // (B & ~A) | ~(B | A) --> ~A
2260   Value *NotA;
2261   if (match(X,
2262             m_c_And(m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))),
2263                     m_Value(B))) &&
2264       match(Y, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
2265     return NotA;
2266 
2267   // ~(A ^ B) | (A & B) --> ~(A ^ B)
2268   // ~(A ^ B) | (B & A) --> ~(A ^ B)
2269   Value *NotAB;
2270   if (match(X, m_CombineAnd(m_NotForbidUndef(m_Xor(m_Value(A), m_Value(B))),
2271                             m_Value(NotAB))) &&
2272       match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2273     return NotAB;
2274 
2275   // ~(A & B) | (A ^ B) --> ~(A & B)
2276   // ~(A & B) | (B ^ A) --> ~(A & B)
2277   if (match(X, m_CombineAnd(m_NotForbidUndef(m_And(m_Value(A), m_Value(B))),
2278                             m_Value(NotAB))) &&
2279       match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2280     return NotAB;
2281 
2282   return nullptr;
2283 }
2284 
2285 /// Given operands for an Or, see if we can fold the result.
2286 /// If not, this returns null.
2287 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2288                              unsigned MaxRecurse) {
2289   if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2290     return C;
2291 
2292   // X | poison -> poison
2293   if (isa<PoisonValue>(Op1))
2294     return Op1;
2295 
2296   // X | undef -> -1
2297   // X | -1 = -1
2298   // Do not return Op1 because it may contain undef elements if it's a vector.
2299   if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes()))
2300     return Constant::getAllOnesValue(Op0->getType());
2301 
2302   // X | X = X
2303   // X | 0 = X
2304   if (Op0 == Op1 || match(Op1, m_Zero()))
2305     return Op0;
2306 
2307   if (Value *R = simplifyOrLogic(Op0, Op1))
2308     return R;
2309   if (Value *R = simplifyOrLogic(Op1, Op0))
2310     return R;
2311 
2312   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2313     return V;
2314 
2315   // Rotated -1 is still -1:
2316   // (-1 << X) | (-1 >> (C - X)) --> -1
2317   // (-1 >> X) | (-1 << (C - X)) --> -1
2318   // ...with C <= bitwidth (and commuted variants).
2319   Value *X, *Y;
2320   if ((match(Op0, m_Shl(m_AllOnes(), m_Value(X))) &&
2321        match(Op1, m_LShr(m_AllOnes(), m_Value(Y)))) ||
2322       (match(Op1, m_Shl(m_AllOnes(), m_Value(X))) &&
2323        match(Op0, m_LShr(m_AllOnes(), m_Value(Y))))) {
2324     const APInt *C;
2325     if ((match(X, m_Sub(m_APInt(C), m_Specific(Y))) ||
2326          match(Y, m_Sub(m_APInt(C), m_Specific(X)))) &&
2327         C->ule(X->getType()->getScalarSizeInBits())) {
2328       return ConstantInt::getAllOnesValue(X->getType());
2329     }
2330   }
2331 
2332   if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2333     return V;
2334 
2335   // If we have a multiplication overflow check that is being 'and'ed with a
2336   // check that one of the multipliers is not zero, we can omit the 'and', and
2337   // only keep the overflow check.
2338   if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
2339     return Op1;
2340   if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
2341     return Op0;
2342 
2343   // Try some generic simplifications for associative operations.
2344   if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
2345                                           MaxRecurse))
2346     return V;
2347 
2348   // Or distributes over And.  Try some generic simplifications based on this.
2349   if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2350                                         Instruction::And, Q, MaxRecurse))
2351     return V;
2352 
2353   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2354     if (Op0->getType()->isIntOrIntVectorTy(1)) {
2355       // A | (A || B) -> A || B
2356       if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value())))
2357         return Op1;
2358       else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value())))
2359         return Op0;
2360     }
2361     // If the operation is with the result of a select instruction, check
2362     // whether operating on either branch of the select always yields the same
2363     // value.
2364     if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q,
2365                                          MaxRecurse))
2366       return V;
2367   }
2368 
2369   // (A & C1)|(B & C2)
2370   Value *A, *B;
2371   const APInt *C1, *C2;
2372   if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2373       match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2374     if (*C1 == ~*C2) {
2375       // (A & C1)|(B & C2)
2376       // If we have: ((V + N) & C1) | (V & C2)
2377       // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2378       // replace with V+N.
2379       Value *N;
2380       if (C2->isMask() && // C2 == 0+1+
2381           match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
2382         // Add commutes, try both ways.
2383         if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2384           return A;
2385       }
2386       // Or commutes, try both ways.
2387       if (C1->isMask() &&
2388           match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2389         // Add commutes, try both ways.
2390         if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2391           return B;
2392       }
2393     }
2394   }
2395 
2396   // If the operation is with the result of a phi instruction, check whether
2397   // operating on all incoming values of the phi always yields the same value.
2398   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2399     if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2400       return V;
2401 
2402   return nullptr;
2403 }
2404 
2405 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2406   return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit);
2407 }
2408 
2409 /// Given operands for a Xor, see if we can fold the result.
2410 /// If not, this returns null.
2411 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2412                               unsigned MaxRecurse) {
2413   if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2414     return C;
2415 
2416   // X ^ poison -> poison
2417   if (isa<PoisonValue>(Op1))
2418     return Op1;
2419 
2420   // A ^ undef -> undef
2421   if (Q.isUndefValue(Op1))
2422     return Op1;
2423 
2424   // A ^ 0 = A
2425   if (match(Op1, m_Zero()))
2426     return Op0;
2427 
2428   // A ^ A = 0
2429   if (Op0 == Op1)
2430     return Constant::getNullValue(Op0->getType());
2431 
2432   // A ^ ~A  =  ~A ^ A  =  -1
2433   if (match(Op0, m_Not(m_Specific(Op1))) ||
2434       match(Op1, m_Not(m_Specific(Op0))))
2435     return Constant::getAllOnesValue(Op0->getType());
2436 
2437   auto foldAndOrNot = [](Value *X, Value *Y) -> Value * {
2438     Value *A, *B;
2439     // (~A & B) ^ (A | B) --> A -- There are 8 commuted variants.
2440     if (match(X, m_c_And(m_Not(m_Value(A)), m_Value(B))) &&
2441         match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2442       return A;
2443 
2444     // (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants.
2445     // The 'not' op must contain a complete -1 operand (no undef elements for
2446     // vector) for the transform to be safe.
2447     Value *NotA;
2448     if (match(X,
2449               m_c_Or(m_CombineAnd(m_NotForbidUndef(m_Value(A)), m_Value(NotA)),
2450                      m_Value(B))) &&
2451         match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2452       return NotA;
2453 
2454     return nullptr;
2455   };
2456   if (Value *R = foldAndOrNot(Op0, Op1))
2457     return R;
2458   if (Value *R = foldAndOrNot(Op1, Op0))
2459     return R;
2460 
2461   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2462     return V;
2463 
2464   // Try some generic simplifications for associative operations.
2465   if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q,
2466                                           MaxRecurse))
2467     return V;
2468 
2469   // Threading Xor over selects and phi nodes is pointless, so don't bother.
2470   // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2471   // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2472   // only if B and C are equal.  If B and C are equal then (since we assume
2473   // that operands have already been simplified) "select(cond, B, C)" should
2474   // have been simplified to the common value of B and C already.  Analysing
2475   // "A^B" and "A^C" thus gains nothing, but costs compile time.  Similarly
2476   // for threading over phi nodes.
2477 
2478   return nullptr;
2479 }
2480 
2481 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2482   return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit);
2483 }
2484 
2485 
2486 static Type *GetCompareTy(Value *Op) {
2487   return CmpInst::makeCmpResultType(Op->getType());
2488 }
2489 
2490 /// Rummage around inside V looking for something equivalent to the comparison
2491 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
2492 /// Helper function for analyzing max/min idioms.
2493 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2494                                          Value *LHS, Value *RHS) {
2495   SelectInst *SI = dyn_cast<SelectInst>(V);
2496   if (!SI)
2497     return nullptr;
2498   CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2499   if (!Cmp)
2500     return nullptr;
2501   Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2502   if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2503     return Cmp;
2504   if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2505       LHS == CmpRHS && RHS == CmpLHS)
2506     return Cmp;
2507   return nullptr;
2508 }
2509 
2510 // A significant optimization not implemented here is assuming that alloca
2511 // addresses are not equal to incoming argument values. They don't *alias*,
2512 // as we say, but that doesn't mean they aren't equal, so we take a
2513 // conservative approach.
2514 //
2515 // This is inspired in part by C++11 5.10p1:
2516 //   "Two pointers of the same type compare equal if and only if they are both
2517 //    null, both point to the same function, or both represent the same
2518 //    address."
2519 //
2520 // This is pretty permissive.
2521 //
2522 // It's also partly due to C11 6.5.9p6:
2523 //   "Two pointers compare equal if and only if both are null pointers, both are
2524 //    pointers to the same object (including a pointer to an object and a
2525 //    subobject at its beginning) or function, both are pointers to one past the
2526 //    last element of the same array object, or one is a pointer to one past the
2527 //    end of one array object and the other is a pointer to the start of a
2528 //    different array object that happens to immediately follow the first array
2529 //    object in the address space.)
2530 //
2531 // C11's version is more restrictive, however there's no reason why an argument
2532 // couldn't be a one-past-the-end value for a stack object in the caller and be
2533 // equal to the beginning of a stack object in the callee.
2534 //
2535 // If the C and C++ standards are ever made sufficiently restrictive in this
2536 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2537 // this optimization.
2538 static Constant *
2539 computePointerICmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2540                    const SimplifyQuery &Q) {
2541   const DataLayout &DL = Q.DL;
2542   const TargetLibraryInfo *TLI = Q.TLI;
2543   const DominatorTree *DT = Q.DT;
2544   const Instruction *CxtI = Q.CxtI;
2545   const InstrInfoQuery &IIQ = Q.IIQ;
2546 
2547   // First, skip past any trivial no-ops.
2548   LHS = LHS->stripPointerCasts();
2549   RHS = RHS->stripPointerCasts();
2550 
2551   // A non-null pointer is not equal to a null pointer.
2552   if (isa<ConstantPointerNull>(RHS) && ICmpInst::isEquality(Pred) &&
2553       llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr,
2554                            IIQ.UseInstrInfo))
2555     return ConstantInt::get(GetCompareTy(LHS),
2556                             !CmpInst::isTrueWhenEqual(Pred));
2557 
2558   // We can only fold certain predicates on pointer comparisons.
2559   switch (Pred) {
2560   default:
2561     return nullptr;
2562 
2563     // Equality comaprisons are easy to fold.
2564   case CmpInst::ICMP_EQ:
2565   case CmpInst::ICMP_NE:
2566     break;
2567 
2568     // We can only handle unsigned relational comparisons because 'inbounds' on
2569     // a GEP only protects against unsigned wrapping.
2570   case CmpInst::ICMP_UGT:
2571   case CmpInst::ICMP_UGE:
2572   case CmpInst::ICMP_ULT:
2573   case CmpInst::ICMP_ULE:
2574     // However, we have to switch them to their signed variants to handle
2575     // negative indices from the base pointer.
2576     Pred = ICmpInst::getSignedPredicate(Pred);
2577     break;
2578   }
2579 
2580   // Strip off any constant offsets so that we can reason about them.
2581   // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2582   // here and compare base addresses like AliasAnalysis does, however there are
2583   // numerous hazards. AliasAnalysis and its utilities rely on special rules
2584   // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2585   // doesn't need to guarantee pointer inequality when it says NoAlias.
2586 
2587   // Even if an non-inbounds GEP occurs along the path we can still optimize
2588   // equality comparisons concerning the result.
2589   bool AllowNonInbounds = ICmpInst::isEquality(Pred);
2590   APInt LHSOffset = stripAndComputeConstantOffsets(DL, LHS, AllowNonInbounds);
2591   APInt RHSOffset = stripAndComputeConstantOffsets(DL, RHS, AllowNonInbounds);
2592 
2593   // If LHS and RHS are related via constant offsets to the same base
2594   // value, we can replace it with an icmp which just compares the offsets.
2595   if (LHS == RHS)
2596     return ConstantInt::get(
2597         GetCompareTy(LHS), ICmpInst::compare(LHSOffset, RHSOffset, Pred));
2598 
2599   // Various optimizations for (in)equality comparisons.
2600   if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2601     // Different non-empty allocations that exist at the same time have
2602     // different addresses (if the program can tell). Global variables always
2603     // exist, so they always exist during the lifetime of each other and all
2604     // allocas. Two different allocas usually have different addresses...
2605     //
2606     // However, if there's an @llvm.stackrestore dynamically in between two
2607     // allocas, they may have the same address. It's tempting to reduce the
2608     // scope of the problem by only looking at *static* allocas here. That would
2609     // cover the majority of allocas while significantly reducing the likelihood
2610     // of having an @llvm.stackrestore pop up in the middle. However, it's not
2611     // actually impossible for an @llvm.stackrestore to pop up in the middle of
2612     // an entry block. Also, if we have a block that's not attached to a
2613     // function, we can't tell if it's "static" under the current definition.
2614     // Theoretically, this problem could be fixed by creating a new kind of
2615     // instruction kind specifically for static allocas. Such a new instruction
2616     // could be required to be at the top of the entry block, thus preventing it
2617     // from being subject to a @llvm.stackrestore. Instcombine could even
2618     // convert regular allocas into these special allocas. It'd be nifty.
2619     // However, until then, this problem remains open.
2620     //
2621     // So, we'll assume that two non-empty allocas have different addresses
2622     // for now.
2623     //
2624     // With all that, if the offsets are within the bounds of their allocations
2625     // (and not one-past-the-end! so we can't use inbounds!), and their
2626     // allocations aren't the same, the pointers are not equal.
2627     //
2628     // Note that it's not necessary to check for LHS being a global variable
2629     // address, due to canonicalization and constant folding.
2630     if (isa<AllocaInst>(LHS) &&
2631         (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) {
2632       uint64_t LHSSize, RHSSize;
2633       ObjectSizeOpts Opts;
2634       Opts.NullIsUnknownSize =
2635           NullPointerIsDefined(cast<AllocaInst>(LHS)->getFunction());
2636       if (getObjectSize(LHS, LHSSize, DL, TLI, Opts) &&
2637           getObjectSize(RHS, RHSSize, DL, TLI, Opts) &&
2638           !LHSOffset.isNegative() && !RHSOffset.isNegative() &&
2639           LHSOffset.ult(LHSSize) && RHSOffset.ult(RHSSize)) {
2640         return ConstantInt::get(GetCompareTy(LHS),
2641                                 !CmpInst::isTrueWhenEqual(Pred));
2642       }
2643 
2644       // Repeat the above check but this time without depending on DataLayout
2645       // or being able to compute a precise size.
2646       if (!cast<PointerType>(LHS->getType())->isEmptyTy() &&
2647           !cast<PointerType>(RHS->getType())->isEmptyTy() &&
2648           LHSOffset.isNullValue() && RHSOffset.isNullValue())
2649         return ConstantInt::get(GetCompareTy(LHS),
2650                                 !CmpInst::isTrueWhenEqual(Pred));
2651     }
2652 
2653     // If one side of the equality comparison must come from a noalias call
2654     // (meaning a system memory allocation function), and the other side must
2655     // come from a pointer that cannot overlap with dynamically-allocated
2656     // memory within the lifetime of the current function (allocas, byval
2657     // arguments, globals), then determine the comparison result here.
2658     SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2659     getUnderlyingObjects(LHS, LHSUObjs);
2660     getUnderlyingObjects(RHS, RHSUObjs);
2661 
2662     // Is the set of underlying objects all noalias calls?
2663     auto IsNAC = [](ArrayRef<const Value *> Objects) {
2664       return all_of(Objects, isNoAliasCall);
2665     };
2666 
2667     // Is the set of underlying objects all things which must be disjoint from
2668     // noalias calls. For allocas, we consider only static ones (dynamic
2669     // allocas might be transformed into calls to malloc not simultaneously
2670     // live with the compared-to allocation). For globals, we exclude symbols
2671     // that might be resolve lazily to symbols in another dynamically-loaded
2672     // library (and, thus, could be malloc'ed by the implementation).
2673     auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2674       return all_of(Objects, [](const Value *V) {
2675         if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2676           return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
2677         if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2678           return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2679                   GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2680                  !GV->isThreadLocal();
2681         if (const Argument *A = dyn_cast<Argument>(V))
2682           return A->hasByValAttr();
2683         return false;
2684       });
2685     };
2686 
2687     if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2688         (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2689         return ConstantInt::get(GetCompareTy(LHS),
2690                                 !CmpInst::isTrueWhenEqual(Pred));
2691 
2692     // Fold comparisons for non-escaping pointer even if the allocation call
2693     // cannot be elided. We cannot fold malloc comparison to null. Also, the
2694     // dynamic allocation call could be either of the operands.  Note that
2695     // the other operand can not be based on the alloc - if it were, then
2696     // the cmp itself would be a capture.
2697     Value *MI = nullptr;
2698     if (isAllocLikeFn(LHS, TLI) &&
2699         llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
2700       MI = LHS;
2701     else if (isAllocLikeFn(RHS, TLI) &&
2702              llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
2703       MI = RHS;
2704     // FIXME: We should also fold the compare when the pointer escapes, but the
2705     // compare dominates the pointer escape
2706     if (MI && !PointerMayBeCaptured(MI, true, true))
2707       return ConstantInt::get(GetCompareTy(LHS),
2708                               CmpInst::isFalseWhenEqual(Pred));
2709   }
2710 
2711   // Otherwise, fail.
2712   return nullptr;
2713 }
2714 
2715 /// Fold an icmp when its operands have i1 scalar type.
2716 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2717                                   Value *RHS, const SimplifyQuery &Q) {
2718   Type *ITy = GetCompareTy(LHS); // The return type.
2719   Type *OpTy = LHS->getType();   // The operand type.
2720   if (!OpTy->isIntOrIntVectorTy(1))
2721     return nullptr;
2722 
2723   // A boolean compared to true/false can be reduced in 14 out of the 20
2724   // (10 predicates * 2 constants) possible combinations. The other
2725   // 6 cases require a 'not' of the LHS.
2726 
2727   auto ExtractNotLHS = [](Value *V) -> Value * {
2728     Value *X;
2729     if (match(V, m_Not(m_Value(X))))
2730       return X;
2731     return nullptr;
2732   };
2733 
2734   if (match(RHS, m_Zero())) {
2735     switch (Pred) {
2736     case CmpInst::ICMP_NE:  // X !=  0 -> X
2737     case CmpInst::ICMP_UGT: // X >u  0 -> X
2738     case CmpInst::ICMP_SLT: // X <s  0 -> X
2739       return LHS;
2740 
2741     case CmpInst::ICMP_EQ:  // not(X) ==  0 -> X != 0 -> X
2742     case CmpInst::ICMP_ULE: // not(X) <=u 0 -> X >u 0 -> X
2743     case CmpInst::ICMP_SGE: // not(X) >=s 0 -> X <s 0 -> X
2744       if (Value *X = ExtractNotLHS(LHS))
2745         return X;
2746       break;
2747 
2748     case CmpInst::ICMP_ULT: // X <u  0 -> false
2749     case CmpInst::ICMP_SGT: // X >s  0 -> false
2750       return getFalse(ITy);
2751 
2752     case CmpInst::ICMP_UGE: // X >=u 0 -> true
2753     case CmpInst::ICMP_SLE: // X <=s 0 -> true
2754       return getTrue(ITy);
2755 
2756     default: break;
2757     }
2758   } else if (match(RHS, m_One())) {
2759     switch (Pred) {
2760     case CmpInst::ICMP_EQ:  // X ==   1 -> X
2761     case CmpInst::ICMP_UGE: // X >=u  1 -> X
2762     case CmpInst::ICMP_SLE: // X <=s -1 -> X
2763       return LHS;
2764 
2765     case CmpInst::ICMP_NE:  // not(X) !=  1 -> X ==   1 -> X
2766     case CmpInst::ICMP_ULT: // not(X) <=u 1 -> X >=u  1 -> X
2767     case CmpInst::ICMP_SGT: // not(X) >s  1 -> X <=s -1 -> X
2768       if (Value *X = ExtractNotLHS(LHS))
2769         return X;
2770       break;
2771 
2772     case CmpInst::ICMP_UGT: // X >u   1 -> false
2773     case CmpInst::ICMP_SLT: // X <s  -1 -> false
2774       return getFalse(ITy);
2775 
2776     case CmpInst::ICMP_ULE: // X <=u  1 -> true
2777     case CmpInst::ICMP_SGE: // X >=s -1 -> true
2778       return getTrue(ITy);
2779 
2780     default: break;
2781     }
2782   }
2783 
2784   switch (Pred) {
2785   default:
2786     break;
2787   case ICmpInst::ICMP_UGE:
2788     if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false))
2789       return getTrue(ITy);
2790     break;
2791   case ICmpInst::ICMP_SGE:
2792     /// For signed comparison, the values for an i1 are 0 and -1
2793     /// respectively. This maps into a truth table of:
2794     /// LHS | RHS | LHS >=s RHS   | LHS implies RHS
2795     ///  0  |  0  |  1 (0 >= 0)   |  1
2796     ///  0  |  1  |  1 (0 >= -1)  |  1
2797     ///  1  |  0  |  0 (-1 >= 0)  |  0
2798     ///  1  |  1  |  1 (-1 >= -1) |  1
2799     if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2800       return getTrue(ITy);
2801     break;
2802   case ICmpInst::ICMP_ULE:
2803     if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2804       return getTrue(ITy);
2805     break;
2806   }
2807 
2808   return nullptr;
2809 }
2810 
2811 /// Try hard to fold icmp with zero RHS because this is a common case.
2812 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2813                                    Value *RHS, const SimplifyQuery &Q) {
2814   if (!match(RHS, m_Zero()))
2815     return nullptr;
2816 
2817   Type *ITy = GetCompareTy(LHS); // The return type.
2818   switch (Pred) {
2819   default:
2820     llvm_unreachable("Unknown ICmp predicate!");
2821   case ICmpInst::ICMP_ULT:
2822     return getFalse(ITy);
2823   case ICmpInst::ICMP_UGE:
2824     return getTrue(ITy);
2825   case ICmpInst::ICMP_EQ:
2826   case ICmpInst::ICMP_ULE:
2827     if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2828       return getFalse(ITy);
2829     break;
2830   case ICmpInst::ICMP_NE:
2831   case ICmpInst::ICMP_UGT:
2832     if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2833       return getTrue(ITy);
2834     break;
2835   case ICmpInst::ICMP_SLT: {
2836     KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2837     if (LHSKnown.isNegative())
2838       return getTrue(ITy);
2839     if (LHSKnown.isNonNegative())
2840       return getFalse(ITy);
2841     break;
2842   }
2843   case ICmpInst::ICMP_SLE: {
2844     KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2845     if (LHSKnown.isNegative())
2846       return getTrue(ITy);
2847     if (LHSKnown.isNonNegative() &&
2848         isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2849       return getFalse(ITy);
2850     break;
2851   }
2852   case ICmpInst::ICMP_SGE: {
2853     KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2854     if (LHSKnown.isNegative())
2855       return getFalse(ITy);
2856     if (LHSKnown.isNonNegative())
2857       return getTrue(ITy);
2858     break;
2859   }
2860   case ICmpInst::ICMP_SGT: {
2861     KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2862     if (LHSKnown.isNegative())
2863       return getFalse(ITy);
2864     if (LHSKnown.isNonNegative() &&
2865         isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2866       return getTrue(ITy);
2867     break;
2868   }
2869   }
2870 
2871   return nullptr;
2872 }
2873 
2874 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
2875                                        Value *RHS, const InstrInfoQuery &IIQ) {
2876   Type *ITy = GetCompareTy(RHS); // The return type.
2877 
2878   Value *X;
2879   // Sign-bit checks can be optimized to true/false after unsigned
2880   // floating-point casts:
2881   // icmp slt (bitcast (uitofp X)),  0 --> false
2882   // icmp sgt (bitcast (uitofp X)), -1 --> true
2883   if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) {
2884     if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
2885       return ConstantInt::getFalse(ITy);
2886     if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
2887       return ConstantInt::getTrue(ITy);
2888   }
2889 
2890   const APInt *C;
2891   if (!match(RHS, m_APIntAllowUndef(C)))
2892     return nullptr;
2893 
2894   // Rule out tautological comparisons (eg., ult 0 or uge 0).
2895   ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
2896   if (RHS_CR.isEmptySet())
2897     return ConstantInt::getFalse(ITy);
2898   if (RHS_CR.isFullSet())
2899     return ConstantInt::getTrue(ITy);
2900 
2901   ConstantRange LHS_CR =
2902       computeConstantRange(LHS, CmpInst::isSigned(Pred), IIQ.UseInstrInfo);
2903   if (!LHS_CR.isFullSet()) {
2904     if (RHS_CR.contains(LHS_CR))
2905       return ConstantInt::getTrue(ITy);
2906     if (RHS_CR.inverse().contains(LHS_CR))
2907       return ConstantInt::getFalse(ITy);
2908   }
2909 
2910   // (mul nuw/nsw X, MulC) != C --> true  (if C is not a multiple of MulC)
2911   // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
2912   const APInt *MulC;
2913   if (ICmpInst::isEquality(Pred) &&
2914       ((match(LHS, m_NUWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
2915         *MulC != 0 && C->urem(*MulC) != 0) ||
2916        (match(LHS, m_NSWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
2917         *MulC != 0 && C->srem(*MulC) != 0)))
2918     return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
2919 
2920   return nullptr;
2921 }
2922 
2923 static Value *simplifyICmpWithBinOpOnLHS(
2924     CmpInst::Predicate Pred, BinaryOperator *LBO, Value *RHS,
2925     const SimplifyQuery &Q, unsigned MaxRecurse) {
2926   Type *ITy = GetCompareTy(RHS); // The return type.
2927 
2928   Value *Y = nullptr;
2929   // icmp pred (or X, Y), X
2930   if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
2931     if (Pred == ICmpInst::ICMP_ULT)
2932       return getFalse(ITy);
2933     if (Pred == ICmpInst::ICMP_UGE)
2934       return getTrue(ITy);
2935 
2936     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2937       KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2938       KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2939       if (RHSKnown.isNonNegative() && YKnown.isNegative())
2940         return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
2941       if (RHSKnown.isNegative() || YKnown.isNonNegative())
2942         return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
2943     }
2944   }
2945 
2946   // icmp pred (and X, Y), X
2947   if (match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) {
2948     if (Pred == ICmpInst::ICMP_UGT)
2949       return getFalse(ITy);
2950     if (Pred == ICmpInst::ICMP_ULE)
2951       return getTrue(ITy);
2952   }
2953 
2954   // icmp pred (urem X, Y), Y
2955   if (match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
2956     switch (Pred) {
2957     default:
2958       break;
2959     case ICmpInst::ICMP_SGT:
2960     case ICmpInst::ICMP_SGE: {
2961       KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2962       if (!Known.isNonNegative())
2963         break;
2964       LLVM_FALLTHROUGH;
2965     }
2966     case ICmpInst::ICMP_EQ:
2967     case ICmpInst::ICMP_UGT:
2968     case ICmpInst::ICMP_UGE:
2969       return getFalse(ITy);
2970     case ICmpInst::ICMP_SLT:
2971     case ICmpInst::ICMP_SLE: {
2972       KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2973       if (!Known.isNonNegative())
2974         break;
2975       LLVM_FALLTHROUGH;
2976     }
2977     case ICmpInst::ICMP_NE:
2978     case ICmpInst::ICMP_ULT:
2979     case ICmpInst::ICMP_ULE:
2980       return getTrue(ITy);
2981     }
2982   }
2983 
2984   // icmp pred (urem X, Y), X
2985   if (match(LBO, m_URem(m_Specific(RHS), m_Value()))) {
2986     if (Pred == ICmpInst::ICMP_ULE)
2987       return getTrue(ITy);
2988     if (Pred == ICmpInst::ICMP_UGT)
2989       return getFalse(ITy);
2990   }
2991 
2992   // x >>u y <=u x --> true.
2993   // x >>u y >u  x --> false.
2994   // x udiv y <=u x --> true.
2995   // x udiv y >u  x --> false.
2996   if (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
2997       match(LBO, m_UDiv(m_Specific(RHS), m_Value()))) {
2998     // icmp pred (X op Y), X
2999     if (Pred == ICmpInst::ICMP_UGT)
3000       return getFalse(ITy);
3001     if (Pred == ICmpInst::ICMP_ULE)
3002       return getTrue(ITy);
3003   }
3004 
3005   // If x is nonzero:
3006   // x >>u C <u  x --> true  for C != 0.
3007   // x >>u C !=  x --> true  for C != 0.
3008   // x >>u C >=u x --> false for C != 0.
3009   // x >>u C ==  x --> false for C != 0.
3010   // x udiv C <u  x --> true  for C != 1.
3011   // x udiv C !=  x --> true  for C != 1.
3012   // x udiv C >=u x --> false for C != 1.
3013   // x udiv C ==  x --> false for C != 1.
3014   // TODO: allow non-constant shift amount/divisor
3015   const APInt *C;
3016   if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
3017       (match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
3018     if (isKnownNonZero(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) {
3019       switch (Pred) {
3020       default:
3021         break;
3022       case ICmpInst::ICMP_EQ:
3023       case ICmpInst::ICMP_UGE:
3024         return getFalse(ITy);
3025       case ICmpInst::ICMP_NE:
3026       case ICmpInst::ICMP_ULT:
3027         return getTrue(ITy);
3028       case ICmpInst::ICMP_UGT:
3029       case ICmpInst::ICMP_ULE:
3030         // UGT/ULE are handled by the more general case just above
3031         llvm_unreachable("Unexpected UGT/ULE, should have been handled");
3032       }
3033     }
3034   }
3035 
3036   // (x*C1)/C2 <= x for C1 <= C2.
3037   // This holds even if the multiplication overflows: Assume that x != 0 and
3038   // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
3039   // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
3040   //
3041   // Additionally, either the multiplication and division might be represented
3042   // as shifts:
3043   // (x*C1)>>C2 <= x for C1 < 2**C2.
3044   // (x<<C1)/C2 <= x for 2**C1 < C2.
3045   const APInt *C1, *C2;
3046   if ((match(LBO, m_UDiv(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3047        C1->ule(*C2)) ||
3048       (match(LBO, m_LShr(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3049        C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
3050       (match(LBO, m_UDiv(m_Shl(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3051        (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
3052     if (Pred == ICmpInst::ICMP_UGT)
3053       return getFalse(ITy);
3054     if (Pred == ICmpInst::ICMP_ULE)
3055       return getTrue(ITy);
3056   }
3057 
3058   return nullptr;
3059 }
3060 
3061 
3062 // If only one of the icmp's operands has NSW flags, try to prove that:
3063 //
3064 //   icmp slt (x + C1), (x +nsw C2)
3065 //
3066 // is equivalent to:
3067 //
3068 //   icmp slt C1, C2
3069 //
3070 // which is true if x + C2 has the NSW flags set and:
3071 // *) C1 < C2 && C1 >= 0, or
3072 // *) C2 < C1 && C1 <= 0.
3073 //
3074 static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS,
3075                                     Value *RHS) {
3076   // TODO: only support icmp slt for now.
3077   if (Pred != CmpInst::ICMP_SLT)
3078     return false;
3079 
3080   // Canonicalize nsw add as RHS.
3081   if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3082     std::swap(LHS, RHS);
3083   if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3084     return false;
3085 
3086   Value *X;
3087   const APInt *C1, *C2;
3088   if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) ||
3089       !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2))))
3090     return false;
3091 
3092   return (C1->slt(*C2) && C1->isNonNegative()) ||
3093          (C2->slt(*C1) && C1->isNonPositive());
3094 }
3095 
3096 
3097 /// TODO: A large part of this logic is duplicated in InstCombine's
3098 /// foldICmpBinOp(). We should be able to share that and avoid the code
3099 /// duplication.
3100 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
3101                                     Value *RHS, const SimplifyQuery &Q,
3102                                     unsigned MaxRecurse) {
3103   BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
3104   BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
3105   if (MaxRecurse && (LBO || RBO)) {
3106     // Analyze the case when either LHS or RHS is an add instruction.
3107     Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3108     // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
3109     bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
3110     if (LBO && LBO->getOpcode() == Instruction::Add) {
3111       A = LBO->getOperand(0);
3112       B = LBO->getOperand(1);
3113       NoLHSWrapProblem =
3114           ICmpInst::isEquality(Pred) ||
3115           (CmpInst::isUnsigned(Pred) &&
3116            Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) ||
3117           (CmpInst::isSigned(Pred) &&
3118            Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)));
3119     }
3120     if (RBO && RBO->getOpcode() == Instruction::Add) {
3121       C = RBO->getOperand(0);
3122       D = RBO->getOperand(1);
3123       NoRHSWrapProblem =
3124           ICmpInst::isEquality(Pred) ||
3125           (CmpInst::isUnsigned(Pred) &&
3126            Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) ||
3127           (CmpInst::isSigned(Pred) &&
3128            Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO)));
3129     }
3130 
3131     // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
3132     if ((A == RHS || B == RHS) && NoLHSWrapProblem)
3133       if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
3134                                       Constant::getNullValue(RHS->getType()), Q,
3135                                       MaxRecurse - 1))
3136         return V;
3137 
3138     // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
3139     if ((C == LHS || D == LHS) && NoRHSWrapProblem)
3140       if (Value *V =
3141               SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
3142                                C == LHS ? D : C, Q, MaxRecurse - 1))
3143         return V;
3144 
3145     // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
3146     bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3147                        trySimplifyICmpWithAdds(Pred, LHS, RHS);
3148     if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
3149       // Determine Y and Z in the form icmp (X+Y), (X+Z).
3150       Value *Y, *Z;
3151       if (A == C) {
3152         // C + B == C + D  ->  B == D
3153         Y = B;
3154         Z = D;
3155       } else if (A == D) {
3156         // D + B == C + D  ->  B == C
3157         Y = B;
3158         Z = C;
3159       } else if (B == C) {
3160         // A + C == C + D  ->  A == D
3161         Y = A;
3162         Z = D;
3163       } else {
3164         assert(B == D);
3165         // A + D == C + D  ->  A == C
3166         Y = A;
3167         Z = C;
3168       }
3169       if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
3170         return V;
3171     }
3172   }
3173 
3174   if (LBO)
3175     if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3176       return V;
3177 
3178   if (RBO)
3179     if (Value *V = simplifyICmpWithBinOpOnLHS(
3180             ICmpInst::getSwappedPredicate(Pred), RBO, LHS, Q, MaxRecurse))
3181       return V;
3182 
3183   // 0 - (zext X) pred C
3184   if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
3185     const APInt *C;
3186     if (match(RHS, m_APInt(C))) {
3187       if (C->isStrictlyPositive()) {
3188         if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3189           return ConstantInt::getTrue(GetCompareTy(RHS));
3190         if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3191           return ConstantInt::getFalse(GetCompareTy(RHS));
3192       }
3193       if (C->isNonNegative()) {
3194         if (Pred == ICmpInst::ICMP_SLE)
3195           return ConstantInt::getTrue(GetCompareTy(RHS));
3196         if (Pred == ICmpInst::ICMP_SGT)
3197           return ConstantInt::getFalse(GetCompareTy(RHS));
3198       }
3199     }
3200   }
3201 
3202   //   If C2 is a power-of-2 and C is not:
3203   //   (C2 << X) == C --> false
3204   //   (C2 << X) != C --> true
3205   const APInt *C;
3206   if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
3207       match(RHS, m_APIntAllowUndef(C)) && !C->isPowerOf2()) {
3208     // C2 << X can equal zero in some circumstances.
3209     // This simplification might be unsafe if C is zero.
3210     //
3211     // We know it is safe if:
3212     // - The shift is nsw. We can't shift out the one bit.
3213     // - The shift is nuw. We can't shift out the one bit.
3214     // - C2 is one.
3215     // - C isn't zero.
3216     if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3217         Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3218         match(LHS, m_Shl(m_One(), m_Value())) || !C->isZero()) {
3219       if (Pred == ICmpInst::ICMP_EQ)
3220         return ConstantInt::getFalse(GetCompareTy(RHS));
3221       if (Pred == ICmpInst::ICMP_NE)
3222         return ConstantInt::getTrue(GetCompareTy(RHS));
3223     }
3224   }
3225 
3226   // TODO: This is overly constrained. LHS can be any power-of-2.
3227   // (1 << X)  >u 0x8000 --> false
3228   // (1 << X) <=u 0x8000 --> true
3229   if (match(LHS, m_Shl(m_One(), m_Value())) && match(RHS, m_SignMask())) {
3230     if (Pred == ICmpInst::ICMP_UGT)
3231       return ConstantInt::getFalse(GetCompareTy(RHS));
3232     if (Pred == ICmpInst::ICMP_ULE)
3233       return ConstantInt::getTrue(GetCompareTy(RHS));
3234   }
3235 
3236   if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
3237       LBO->getOperand(1) == RBO->getOperand(1)) {
3238     switch (LBO->getOpcode()) {
3239     default:
3240       break;
3241     case Instruction::UDiv:
3242     case Instruction::LShr:
3243       if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3244           !Q.IIQ.isExact(RBO))
3245         break;
3246       if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3247                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3248           return V;
3249       break;
3250     case Instruction::SDiv:
3251       if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3252           !Q.IIQ.isExact(RBO))
3253         break;
3254       if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3255                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3256         return V;
3257       break;
3258     case Instruction::AShr:
3259       if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3260         break;
3261       if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3262                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3263         return V;
3264       break;
3265     case Instruction::Shl: {
3266       bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3267       bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3268       if (!NUW && !NSW)
3269         break;
3270       if (!NSW && ICmpInst::isSigned(Pred))
3271         break;
3272       if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3273                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3274         return V;
3275       break;
3276     }
3277     }
3278   }
3279   return nullptr;
3280 }
3281 
3282 /// Simplify integer comparisons where at least one operand of the compare
3283 /// matches an integer min/max idiom.
3284 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
3285                                      Value *RHS, const SimplifyQuery &Q,
3286                                      unsigned MaxRecurse) {
3287   Type *ITy = GetCompareTy(LHS); // The return type.
3288   Value *A, *B;
3289   CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
3290   CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3291 
3292   // Signed variants on "max(a,b)>=a -> true".
3293   if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3294     if (A != RHS)
3295       std::swap(A, B);       // smax(A, B) pred A.
3296     EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3297     // We analyze this as smax(A, B) pred A.
3298     P = Pred;
3299   } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3300              (A == LHS || B == LHS)) {
3301     if (A != LHS)
3302       std::swap(A, B);       // A pred smax(A, B).
3303     EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3304     // We analyze this as smax(A, B) swapped-pred A.
3305     P = CmpInst::getSwappedPredicate(Pred);
3306   } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3307              (A == RHS || B == RHS)) {
3308     if (A != RHS)
3309       std::swap(A, B);       // smin(A, B) pred A.
3310     EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3311     // We analyze this as smax(-A, -B) swapped-pred -A.
3312     // Note that we do not need to actually form -A or -B thanks to EqP.
3313     P = CmpInst::getSwappedPredicate(Pred);
3314   } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3315              (A == LHS || B == LHS)) {
3316     if (A != LHS)
3317       std::swap(A, B);       // A pred smin(A, B).
3318     EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3319     // We analyze this as smax(-A, -B) pred -A.
3320     // Note that we do not need to actually form -A or -B thanks to EqP.
3321     P = Pred;
3322   }
3323   if (P != CmpInst::BAD_ICMP_PREDICATE) {
3324     // Cases correspond to "max(A, B) p A".
3325     switch (P) {
3326     default:
3327       break;
3328     case CmpInst::ICMP_EQ:
3329     case CmpInst::ICMP_SLE:
3330       // Equivalent to "A EqP B".  This may be the same as the condition tested
3331       // in the max/min; if so, we can just return that.
3332       if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
3333         return V;
3334       if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
3335         return V;
3336       // Otherwise, see if "A EqP B" simplifies.
3337       if (MaxRecurse)
3338         if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3339           return V;
3340       break;
3341     case CmpInst::ICMP_NE:
3342     case CmpInst::ICMP_SGT: {
3343       CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3344       // Equivalent to "A InvEqP B".  This may be the same as the condition
3345       // tested in the max/min; if so, we can just return that.
3346       if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3347         return V;
3348       if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3349         return V;
3350       // Otherwise, see if "A InvEqP B" simplifies.
3351       if (MaxRecurse)
3352         if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3353           return V;
3354       break;
3355     }
3356     case CmpInst::ICMP_SGE:
3357       // Always true.
3358       return getTrue(ITy);
3359     case CmpInst::ICMP_SLT:
3360       // Always false.
3361       return getFalse(ITy);
3362     }
3363   }
3364 
3365   // Unsigned variants on "max(a,b)>=a -> true".
3366   P = CmpInst::BAD_ICMP_PREDICATE;
3367   if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3368     if (A != RHS)
3369       std::swap(A, B);       // umax(A, B) pred A.
3370     EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3371     // We analyze this as umax(A, B) pred A.
3372     P = Pred;
3373   } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3374              (A == LHS || B == LHS)) {
3375     if (A != LHS)
3376       std::swap(A, B);       // A pred umax(A, B).
3377     EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3378     // We analyze this as umax(A, B) swapped-pred A.
3379     P = CmpInst::getSwappedPredicate(Pred);
3380   } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3381              (A == RHS || B == RHS)) {
3382     if (A != RHS)
3383       std::swap(A, B);       // umin(A, B) pred A.
3384     EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3385     // We analyze this as umax(-A, -B) swapped-pred -A.
3386     // Note that we do not need to actually form -A or -B thanks to EqP.
3387     P = CmpInst::getSwappedPredicate(Pred);
3388   } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3389              (A == LHS || B == LHS)) {
3390     if (A != LHS)
3391       std::swap(A, B);       // A pred umin(A, B).
3392     EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3393     // We analyze this as umax(-A, -B) pred -A.
3394     // Note that we do not need to actually form -A or -B thanks to EqP.
3395     P = Pred;
3396   }
3397   if (P != CmpInst::BAD_ICMP_PREDICATE) {
3398     // Cases correspond to "max(A, B) p A".
3399     switch (P) {
3400     default:
3401       break;
3402     case CmpInst::ICMP_EQ:
3403     case CmpInst::ICMP_ULE:
3404       // Equivalent to "A EqP B".  This may be the same as the condition tested
3405       // in the max/min; if so, we can just return that.
3406       if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
3407         return V;
3408       if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
3409         return V;
3410       // Otherwise, see if "A EqP B" simplifies.
3411       if (MaxRecurse)
3412         if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3413           return V;
3414       break;
3415     case CmpInst::ICMP_NE:
3416     case CmpInst::ICMP_UGT: {
3417       CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3418       // Equivalent to "A InvEqP B".  This may be the same as the condition
3419       // tested in the max/min; if so, we can just return that.
3420       if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3421         return V;
3422       if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3423         return V;
3424       // Otherwise, see if "A InvEqP B" simplifies.
3425       if (MaxRecurse)
3426         if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3427           return V;
3428       break;
3429     }
3430     case CmpInst::ICMP_UGE:
3431       return getTrue(ITy);
3432     case CmpInst::ICMP_ULT:
3433       return getFalse(ITy);
3434     }
3435   }
3436 
3437   // Comparing 1 each of min/max with a common operand?
3438   // Canonicalize min operand to RHS.
3439   if (match(LHS, m_UMin(m_Value(), m_Value())) ||
3440       match(LHS, m_SMin(m_Value(), m_Value()))) {
3441     std::swap(LHS, RHS);
3442     Pred = ICmpInst::getSwappedPredicate(Pred);
3443   }
3444 
3445   Value *C, *D;
3446   if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3447       match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3448       (A == C || A == D || B == C || B == D)) {
3449     // smax(A, B) >=s smin(A, D) --> true
3450     if (Pred == CmpInst::ICMP_SGE)
3451       return getTrue(ITy);
3452     // smax(A, B) <s smin(A, D) --> false
3453     if (Pred == CmpInst::ICMP_SLT)
3454       return getFalse(ITy);
3455   } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3456              match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3457              (A == C || A == D || B == C || B == D)) {
3458     // umax(A, B) >=u umin(A, D) --> true
3459     if (Pred == CmpInst::ICMP_UGE)
3460       return getTrue(ITy);
3461     // umax(A, B) <u umin(A, D) --> false
3462     if (Pred == CmpInst::ICMP_ULT)
3463       return getFalse(ITy);
3464   }
3465 
3466   return nullptr;
3467 }
3468 
3469 static Value *simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,
3470                                                Value *LHS, Value *RHS,
3471                                                const SimplifyQuery &Q) {
3472   // Gracefully handle instructions that have not been inserted yet.
3473   if (!Q.AC || !Q.CxtI || !Q.CxtI->getParent())
3474     return nullptr;
3475 
3476   for (Value *AssumeBaseOp : {LHS, RHS}) {
3477     for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3478       if (!AssumeVH)
3479         continue;
3480 
3481       CallInst *Assume = cast<CallInst>(AssumeVH);
3482       if (Optional<bool> Imp =
3483               isImpliedCondition(Assume->getArgOperand(0), Predicate, LHS, RHS,
3484                                  Q.DL))
3485         if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3486           return ConstantInt::get(GetCompareTy(LHS), *Imp);
3487     }
3488   }
3489 
3490   return nullptr;
3491 }
3492 
3493 /// Given operands for an ICmpInst, see if we can fold the result.
3494 /// If not, this returns null.
3495 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3496                                const SimplifyQuery &Q, unsigned MaxRecurse) {
3497   CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3498   assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3499 
3500   if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3501     if (Constant *CRHS = dyn_cast<Constant>(RHS))
3502       return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3503 
3504     // If we have a constant, make sure it is on the RHS.
3505     std::swap(LHS, RHS);
3506     Pred = CmpInst::getSwappedPredicate(Pred);
3507   }
3508   assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3509 
3510   Type *ITy = GetCompareTy(LHS); // The return type.
3511 
3512   // icmp poison, X -> poison
3513   if (isa<PoisonValue>(RHS))
3514     return PoisonValue::get(ITy);
3515 
3516   // For EQ and NE, we can always pick a value for the undef to make the
3517   // predicate pass or fail, so we can return undef.
3518   // Matches behavior in llvm::ConstantFoldCompareInstruction.
3519   if (Q.isUndefValue(RHS) && ICmpInst::isEquality(Pred))
3520     return UndefValue::get(ITy);
3521 
3522   // icmp X, X -> true/false
3523   // icmp X, undef -> true/false because undef could be X.
3524   if (LHS == RHS || Q.isUndefValue(RHS))
3525     return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3526 
3527   if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3528     return V;
3529 
3530   // TODO: Sink/common this with other potentially expensive calls that use
3531   //       ValueTracking? See comment below for isKnownNonEqual().
3532   if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3533     return V;
3534 
3535   if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ))
3536     return V;
3537 
3538   // If both operands have range metadata, use the metadata
3539   // to simplify the comparison.
3540   if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3541     auto RHS_Instr = cast<Instruction>(RHS);
3542     auto LHS_Instr = cast<Instruction>(LHS);
3543 
3544     if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) &&
3545         Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) {
3546       auto RHS_CR = getConstantRangeFromMetadata(
3547           *RHS_Instr->getMetadata(LLVMContext::MD_range));
3548       auto LHS_CR = getConstantRangeFromMetadata(
3549           *LHS_Instr->getMetadata(LLVMContext::MD_range));
3550 
3551       if (LHS_CR.icmp(Pred, RHS_CR))
3552         return ConstantInt::getTrue(RHS->getContext());
3553 
3554       if (LHS_CR.icmp(CmpInst::getInversePredicate(Pred), RHS_CR))
3555         return ConstantInt::getFalse(RHS->getContext());
3556     }
3557   }
3558 
3559   // Compare of cast, for example (zext X) != 0 -> X != 0
3560   if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3561     Instruction *LI = cast<CastInst>(LHS);
3562     Value *SrcOp = LI->getOperand(0);
3563     Type *SrcTy = SrcOp->getType();
3564     Type *DstTy = LI->getType();
3565 
3566     // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3567     // if the integer type is the same size as the pointer type.
3568     if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3569         Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3570       if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3571         // Transfer the cast to the constant.
3572         if (Value *V = SimplifyICmpInst(Pred, SrcOp,
3573                                         ConstantExpr::getIntToPtr(RHSC, SrcTy),
3574                                         Q, MaxRecurse-1))
3575           return V;
3576       } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3577         if (RI->getOperand(0)->getType() == SrcTy)
3578           // Compare without the cast.
3579           if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3580                                           Q, MaxRecurse-1))
3581             return V;
3582       }
3583     }
3584 
3585     if (isa<ZExtInst>(LHS)) {
3586       // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3587       // same type.
3588       if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3589         if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3590           // Compare X and Y.  Note that signed predicates become unsigned.
3591           if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3592                                           SrcOp, RI->getOperand(0), Q,
3593                                           MaxRecurse-1))
3594             return V;
3595       }
3596       // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3597       else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3598         if (SrcOp == RI->getOperand(0)) {
3599           if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3600             return ConstantInt::getTrue(ITy);
3601           if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3602             return ConstantInt::getFalse(ITy);
3603         }
3604       }
3605       // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3606       // too.  If not, then try to deduce the result of the comparison.
3607       else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3608         // Compute the constant that would happen if we truncated to SrcTy then
3609         // reextended to DstTy.
3610         Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3611         Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
3612 
3613         // If the re-extended constant didn't change then this is effectively
3614         // also a case of comparing two zero-extended values.
3615         if (RExt == CI && MaxRecurse)
3616           if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3617                                         SrcOp, Trunc, Q, MaxRecurse-1))
3618             return V;
3619 
3620         // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3621         // there.  Use this to work out the result of the comparison.
3622         if (RExt != CI) {
3623           switch (Pred) {
3624           default: llvm_unreachable("Unknown ICmp predicate!");
3625           // LHS <u RHS.
3626           case ICmpInst::ICMP_EQ:
3627           case ICmpInst::ICMP_UGT:
3628           case ICmpInst::ICMP_UGE:
3629             return ConstantInt::getFalse(CI->getContext());
3630 
3631           case ICmpInst::ICMP_NE:
3632           case ICmpInst::ICMP_ULT:
3633           case ICmpInst::ICMP_ULE:
3634             return ConstantInt::getTrue(CI->getContext());
3635 
3636           // LHS is non-negative.  If RHS is negative then LHS >s LHS.  If RHS
3637           // is non-negative then LHS <s RHS.
3638           case ICmpInst::ICMP_SGT:
3639           case ICmpInst::ICMP_SGE:
3640             return CI->getValue().isNegative() ?
3641               ConstantInt::getTrue(CI->getContext()) :
3642               ConstantInt::getFalse(CI->getContext());
3643 
3644           case ICmpInst::ICMP_SLT:
3645           case ICmpInst::ICMP_SLE:
3646             return CI->getValue().isNegative() ?
3647               ConstantInt::getFalse(CI->getContext()) :
3648               ConstantInt::getTrue(CI->getContext());
3649           }
3650         }
3651       }
3652     }
3653 
3654     if (isa<SExtInst>(LHS)) {
3655       // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3656       // same type.
3657       if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3658         if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3659           // Compare X and Y.  Note that the predicate does not change.
3660           if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3661                                           Q, MaxRecurse-1))
3662             return V;
3663       }
3664       // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3665       else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3666         if (SrcOp == RI->getOperand(0)) {
3667           if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3668             return ConstantInt::getTrue(ITy);
3669           if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3670             return ConstantInt::getFalse(ITy);
3671         }
3672       }
3673       // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3674       // too.  If not, then try to deduce the result of the comparison.
3675       else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3676         // Compute the constant that would happen if we truncated to SrcTy then
3677         // reextended to DstTy.
3678         Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3679         Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
3680 
3681         // If the re-extended constant didn't change then this is effectively
3682         // also a case of comparing two sign-extended values.
3683         if (RExt == CI && MaxRecurse)
3684           if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1))
3685             return V;
3686 
3687         // Otherwise the upper bits of LHS are all equal, while RHS has varying
3688         // bits there.  Use this to work out the result of the comparison.
3689         if (RExt != CI) {
3690           switch (Pred) {
3691           default: llvm_unreachable("Unknown ICmp predicate!");
3692           case ICmpInst::ICMP_EQ:
3693             return ConstantInt::getFalse(CI->getContext());
3694           case ICmpInst::ICMP_NE:
3695             return ConstantInt::getTrue(CI->getContext());
3696 
3697           // If RHS is non-negative then LHS <s RHS.  If RHS is negative then
3698           // LHS >s RHS.
3699           case ICmpInst::ICMP_SGT:
3700           case ICmpInst::ICMP_SGE:
3701             return CI->getValue().isNegative() ?
3702               ConstantInt::getTrue(CI->getContext()) :
3703               ConstantInt::getFalse(CI->getContext());
3704           case ICmpInst::ICMP_SLT:
3705           case ICmpInst::ICMP_SLE:
3706             return CI->getValue().isNegative() ?
3707               ConstantInt::getFalse(CI->getContext()) :
3708               ConstantInt::getTrue(CI->getContext());
3709 
3710           // If LHS is non-negative then LHS <u RHS.  If LHS is negative then
3711           // LHS >u RHS.
3712           case ICmpInst::ICMP_UGT:
3713           case ICmpInst::ICMP_UGE:
3714             // Comparison is true iff the LHS <s 0.
3715             if (MaxRecurse)
3716               if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3717                                               Constant::getNullValue(SrcTy),
3718                                               Q, MaxRecurse-1))
3719                 return V;
3720             break;
3721           case ICmpInst::ICMP_ULT:
3722           case ICmpInst::ICMP_ULE:
3723             // Comparison is true iff the LHS >=s 0.
3724             if (MaxRecurse)
3725               if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3726                                               Constant::getNullValue(SrcTy),
3727                                               Q, MaxRecurse-1))
3728                 return V;
3729             break;
3730           }
3731         }
3732       }
3733     }
3734   }
3735 
3736   // icmp eq|ne X, Y -> false|true if X != Y
3737   // This is potentially expensive, and we have already computedKnownBits for
3738   // compares with 0 above here, so only try this for a non-zero compare.
3739   if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
3740       isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
3741     return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
3742   }
3743 
3744   if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3745     return V;
3746 
3747   if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3748     return V;
3749 
3750   if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
3751     return V;
3752 
3753   // Simplify comparisons of related pointers using a powerful, recursive
3754   // GEP-walk when we have target data available..
3755   if (LHS->getType()->isPointerTy())
3756     if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
3757       return C;
3758   if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3759     if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3760       if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
3761               Q.DL.getTypeSizeInBits(CLHS->getType()) &&
3762           Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
3763               Q.DL.getTypeSizeInBits(CRHS->getType()))
3764         if (auto *C = computePointerICmp(Pred, CLHS->getPointerOperand(),
3765                                          CRHS->getPointerOperand(), Q))
3766           return C;
3767 
3768   // If the comparison is with the result of a select instruction, check whether
3769   // comparing with either branch of the select always yields the same value.
3770   if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3771     if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3772       return V;
3773 
3774   // If the comparison is with the result of a phi instruction, check whether
3775   // doing the compare with each incoming phi value yields a common result.
3776   if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3777     if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3778       return V;
3779 
3780   return nullptr;
3781 }
3782 
3783 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3784                               const SimplifyQuery &Q) {
3785   return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
3786 }
3787 
3788 /// Given operands for an FCmpInst, see if we can fold the result.
3789 /// If not, this returns null.
3790 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3791                                FastMathFlags FMF, const SimplifyQuery &Q,
3792                                unsigned MaxRecurse) {
3793   CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3794   assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
3795 
3796   if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3797     if (Constant *CRHS = dyn_cast<Constant>(RHS))
3798       return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3799 
3800     // If we have a constant, make sure it is on the RHS.
3801     std::swap(LHS, RHS);
3802     Pred = CmpInst::getSwappedPredicate(Pred);
3803   }
3804 
3805   // Fold trivial predicates.
3806   Type *RetTy = GetCompareTy(LHS);
3807   if (Pred == FCmpInst::FCMP_FALSE)
3808     return getFalse(RetTy);
3809   if (Pred == FCmpInst::FCMP_TRUE)
3810     return getTrue(RetTy);
3811 
3812   // Fold (un)ordered comparison if we can determine there are no NaNs.
3813   if (Pred == FCmpInst::FCMP_UNO || Pred == FCmpInst::FCMP_ORD)
3814     if (FMF.noNaNs() ||
3815         (isKnownNeverNaN(LHS, Q.TLI) && isKnownNeverNaN(RHS, Q.TLI)))
3816       return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
3817 
3818   // NaN is unordered; NaN is not ordered.
3819   assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) &&
3820          "Comparison must be either ordered or unordered");
3821   if (match(RHS, m_NaN()))
3822     return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3823 
3824   // fcmp pred x, poison and  fcmp pred poison, x
3825   // fold to poison
3826   if (isa<PoisonValue>(LHS) || isa<PoisonValue>(RHS))
3827     return PoisonValue::get(RetTy);
3828 
3829   // fcmp pred x, undef  and  fcmp pred undef, x
3830   // fold to true if unordered, false if ordered
3831   if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
3832     // Choosing NaN for the undef will always make unordered comparison succeed
3833     // and ordered comparison fail.
3834     return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3835   }
3836 
3837   // fcmp x,x -> true/false.  Not all compares are foldable.
3838   if (LHS == RHS) {
3839     if (CmpInst::isTrueWhenEqual(Pred))
3840       return getTrue(RetTy);
3841     if (CmpInst::isFalseWhenEqual(Pred))
3842       return getFalse(RetTy);
3843   }
3844 
3845   // Handle fcmp with constant RHS.
3846   // TODO: Use match with a specific FP value, so these work with vectors with
3847   // undef lanes.
3848   const APFloat *C;
3849   if (match(RHS, m_APFloat(C))) {
3850     // Check whether the constant is an infinity.
3851     if (C->isInfinity()) {
3852       if (C->isNegative()) {
3853         switch (Pred) {
3854         case FCmpInst::FCMP_OLT:
3855           // No value is ordered and less than negative infinity.
3856           return getFalse(RetTy);
3857         case FCmpInst::FCMP_UGE:
3858           // All values are unordered with or at least negative infinity.
3859           return getTrue(RetTy);
3860         default:
3861           break;
3862         }
3863       } else {
3864         switch (Pred) {
3865         case FCmpInst::FCMP_OGT:
3866           // No value is ordered and greater than infinity.
3867           return getFalse(RetTy);
3868         case FCmpInst::FCMP_ULE:
3869           // All values are unordered with and at most infinity.
3870           return getTrue(RetTy);
3871         default:
3872           break;
3873         }
3874       }
3875 
3876       // LHS == Inf
3877       if (Pred == FCmpInst::FCMP_OEQ && isKnownNeverInfinity(LHS, Q.TLI))
3878         return getFalse(RetTy);
3879       // LHS != Inf
3880       if (Pred == FCmpInst::FCMP_UNE && isKnownNeverInfinity(LHS, Q.TLI))
3881         return getTrue(RetTy);
3882       // LHS == Inf || LHS == NaN
3883       if (Pred == FCmpInst::FCMP_UEQ && isKnownNeverInfinity(LHS, Q.TLI) &&
3884           isKnownNeverNaN(LHS, Q.TLI))
3885         return getFalse(RetTy);
3886       // LHS != Inf && LHS != NaN
3887       if (Pred == FCmpInst::FCMP_ONE && isKnownNeverInfinity(LHS, Q.TLI) &&
3888           isKnownNeverNaN(LHS, Q.TLI))
3889         return getTrue(RetTy);
3890     }
3891     if (C->isNegative() && !C->isNegZero()) {
3892       assert(!C->isNaN() && "Unexpected NaN constant!");
3893       // TODO: We can catch more cases by using a range check rather than
3894       //       relying on CannotBeOrderedLessThanZero.
3895       switch (Pred) {
3896       case FCmpInst::FCMP_UGE:
3897       case FCmpInst::FCMP_UGT:
3898       case FCmpInst::FCMP_UNE:
3899         // (X >= 0) implies (X > C) when (C < 0)
3900         if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3901           return getTrue(RetTy);
3902         break;
3903       case FCmpInst::FCMP_OEQ:
3904       case FCmpInst::FCMP_OLE:
3905       case FCmpInst::FCMP_OLT:
3906         // (X >= 0) implies !(X < C) when (C < 0)
3907         if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3908           return getFalse(RetTy);
3909         break;
3910       default:
3911         break;
3912       }
3913     }
3914 
3915     // Check comparison of [minnum/maxnum with constant] with other constant.
3916     const APFloat *C2;
3917     if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) &&
3918          *C2 < *C) ||
3919         (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) &&
3920          *C2 > *C)) {
3921       bool IsMaxNum =
3922           cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
3923       // The ordered relationship and minnum/maxnum guarantee that we do not
3924       // have NaN constants, so ordered/unordered preds are handled the same.
3925       switch (Pred) {
3926       case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_UEQ:
3927         // minnum(X, LesserC)  == C --> false
3928         // maxnum(X, GreaterC) == C --> false
3929         return getFalse(RetTy);
3930       case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_UNE:
3931         // minnum(X, LesserC)  != C --> true
3932         // maxnum(X, GreaterC) != C --> true
3933         return getTrue(RetTy);
3934       case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_UGE:
3935       case FCmpInst::FCMP_OGT: case FCmpInst::FCMP_UGT:
3936         // minnum(X, LesserC)  >= C --> false
3937         // minnum(X, LesserC)  >  C --> false
3938         // maxnum(X, GreaterC) >= C --> true
3939         // maxnum(X, GreaterC) >  C --> true
3940         return ConstantInt::get(RetTy, IsMaxNum);
3941       case FCmpInst::FCMP_OLE: case FCmpInst::FCMP_ULE:
3942       case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_ULT:
3943         // minnum(X, LesserC)  <= C --> true
3944         // minnum(X, LesserC)  <  C --> true
3945         // maxnum(X, GreaterC) <= C --> false
3946         // maxnum(X, GreaterC) <  C --> false
3947         return ConstantInt::get(RetTy, !IsMaxNum);
3948       default:
3949         // TRUE/FALSE/ORD/UNO should be handled before this.
3950         llvm_unreachable("Unexpected fcmp predicate");
3951       }
3952     }
3953   }
3954 
3955   if (match(RHS, m_AnyZeroFP())) {
3956     switch (Pred) {
3957     case FCmpInst::FCMP_OGE:
3958     case FCmpInst::FCMP_ULT:
3959       // Positive or zero X >= 0.0 --> true
3960       // Positive or zero X <  0.0 --> false
3961       if ((FMF.noNaNs() || isKnownNeverNaN(LHS, Q.TLI)) &&
3962           CannotBeOrderedLessThanZero(LHS, Q.TLI))
3963         return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
3964       break;
3965     case FCmpInst::FCMP_UGE:
3966     case FCmpInst::FCMP_OLT:
3967       // Positive or zero or nan X >= 0.0 --> true
3968       // Positive or zero or nan X <  0.0 --> false
3969       if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3970         return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
3971       break;
3972     default:
3973       break;
3974     }
3975   }
3976 
3977   // If the comparison is with the result of a select instruction, check whether
3978   // comparing with either branch of the select always yields the same value.
3979   if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3980     if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3981       return V;
3982 
3983   // If the comparison is with the result of a phi instruction, check whether
3984   // doing the compare with each incoming phi value yields a common result.
3985   if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3986     if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3987       return V;
3988 
3989   return nullptr;
3990 }
3991 
3992 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3993                               FastMathFlags FMF, const SimplifyQuery &Q) {
3994   return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
3995 }
3996 
3997 static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3998                                      const SimplifyQuery &Q,
3999                                      bool AllowRefinement,
4000                                      unsigned MaxRecurse) {
4001   assert(!Op->getType()->isVectorTy() && "This is not safe for vectors");
4002 
4003   // Trivial replacement.
4004   if (V == Op)
4005     return RepOp;
4006 
4007   // We cannot replace a constant, and shouldn't even try.
4008   if (isa<Constant>(Op))
4009     return nullptr;
4010 
4011   auto *I = dyn_cast<Instruction>(V);
4012   if (!I || !is_contained(I->operands(), Op))
4013     return nullptr;
4014 
4015   // Replace Op with RepOp in instruction operands.
4016   SmallVector<Value *, 8> NewOps(I->getNumOperands());
4017   transform(I->operands(), NewOps.begin(),
4018             [&](Value *V) { return V == Op ? RepOp : V; });
4019 
4020   if (!AllowRefinement) {
4021     // General InstSimplify functions may refine the result, e.g. by returning
4022     // a constant for a potentially poison value. To avoid this, implement only
4023     // a few non-refining but profitable transforms here.
4024 
4025     if (auto *BO = dyn_cast<BinaryOperator>(I)) {
4026       unsigned Opcode = BO->getOpcode();
4027       // id op x -> x, x op id -> x
4028       if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
4029         return NewOps[1];
4030       if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
4031                                                       /* RHS */ true))
4032         return NewOps[0];
4033 
4034       // x & x -> x, x | x -> x
4035       if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4036           NewOps[0] == NewOps[1])
4037         return NewOps[0];
4038     }
4039 
4040     if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4041       // getelementptr x, 0 -> x
4042       if (NewOps.size() == 2 && match(NewOps[1], m_Zero()) &&
4043           !GEP->isInBounds())
4044         return NewOps[0];
4045     }
4046   } else if (MaxRecurse) {
4047     // The simplification queries below may return the original value. Consider:
4048     //   %div = udiv i32 %arg, %arg2
4049     //   %mul = mul nsw i32 %div, %arg2
4050     //   %cmp = icmp eq i32 %mul, %arg
4051     //   %sel = select i1 %cmp, i32 %div, i32 undef
4052     // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
4053     // simplifies back to %arg. This can only happen because %mul does not
4054     // dominate %div. To ensure a consistent return value contract, we make sure
4055     // that this case returns nullptr as well.
4056     auto PreventSelfSimplify = [V](Value *Simplified) {
4057       return Simplified != V ? Simplified : nullptr;
4058     };
4059 
4060     if (auto *B = dyn_cast<BinaryOperator>(I))
4061       return PreventSelfSimplify(SimplifyBinOp(B->getOpcode(), NewOps[0],
4062                                                NewOps[1], Q, MaxRecurse - 1));
4063 
4064     if (CmpInst *C = dyn_cast<CmpInst>(I))
4065       return PreventSelfSimplify(SimplifyCmpInst(C->getPredicate(), NewOps[0],
4066                                                  NewOps[1], Q, MaxRecurse - 1));
4067 
4068     if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
4069       return PreventSelfSimplify(SimplifyGEPInst(
4070           GEP->getSourceElementType(), NewOps[0], makeArrayRef(NewOps).slice(1),
4071           GEP->isInBounds(), Q, MaxRecurse - 1));
4072 
4073     if (isa<SelectInst>(I))
4074       return PreventSelfSimplify(
4075           SimplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q,
4076                              MaxRecurse - 1));
4077     // TODO: We could hand off more cases to instsimplify here.
4078   }
4079 
4080   // If all operands are constant after substituting Op for RepOp then we can
4081   // constant fold the instruction.
4082   SmallVector<Constant *, 8> ConstOps;
4083   for (Value *NewOp : NewOps) {
4084     if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
4085       ConstOps.push_back(ConstOp);
4086     else
4087       return nullptr;
4088   }
4089 
4090   // Consider:
4091   //   %cmp = icmp eq i32 %x, 2147483647
4092   //   %add = add nsw i32 %x, 1
4093   //   %sel = select i1 %cmp, i32 -2147483648, i32 %add
4094   //
4095   // We can't replace %sel with %add unless we strip away the flags (which
4096   // will be done in InstCombine).
4097   // TODO: This may be unsound, because it only catches some forms of
4098   // refinement.
4099   if (!AllowRefinement && canCreatePoison(cast<Operator>(I)))
4100     return nullptr;
4101 
4102   if (CmpInst *C = dyn_cast<CmpInst>(I))
4103     return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
4104                                            ConstOps[1], Q.DL, Q.TLI);
4105 
4106   if (LoadInst *LI = dyn_cast<LoadInst>(I))
4107     if (!LI->isVolatile())
4108       return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL);
4109 
4110   return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
4111 }
4112 
4113 Value *llvm::simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
4114                                     const SimplifyQuery &Q,
4115                                     bool AllowRefinement) {
4116   return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement,
4117                                   RecursionLimit);
4118 }
4119 
4120 /// Try to simplify a select instruction when its condition operand is an
4121 /// integer comparison where one operand of the compare is a constant.
4122 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
4123                                     const APInt *Y, bool TrueWhenUnset) {
4124   const APInt *C;
4125 
4126   // (X & Y) == 0 ? X & ~Y : X  --> X
4127   // (X & Y) != 0 ? X & ~Y : X  --> X & ~Y
4128   if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
4129       *Y == ~*C)
4130     return TrueWhenUnset ? FalseVal : TrueVal;
4131 
4132   // (X & Y) == 0 ? X : X & ~Y  --> X & ~Y
4133   // (X & Y) != 0 ? X : X & ~Y  --> X
4134   if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
4135       *Y == ~*C)
4136     return TrueWhenUnset ? FalseVal : TrueVal;
4137 
4138   if (Y->isPowerOf2()) {
4139     // (X & Y) == 0 ? X | Y : X  --> X | Y
4140     // (X & Y) != 0 ? X | Y : X  --> X
4141     if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
4142         *Y == *C)
4143       return TrueWhenUnset ? TrueVal : FalseVal;
4144 
4145     // (X & Y) == 0 ? X : X | Y  --> X
4146     // (X & Y) != 0 ? X : X | Y  --> X | Y
4147     if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
4148         *Y == *C)
4149       return TrueWhenUnset ? TrueVal : FalseVal;
4150   }
4151 
4152   return nullptr;
4153 }
4154 
4155 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
4156 /// eq/ne.
4157 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS,
4158                                            ICmpInst::Predicate Pred,
4159                                            Value *TrueVal, Value *FalseVal) {
4160   Value *X;
4161   APInt Mask;
4162   if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask))
4163     return nullptr;
4164 
4165   return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask,
4166                                Pred == ICmpInst::ICMP_EQ);
4167 }
4168 
4169 /// Try to simplify a select instruction when its condition operand is an
4170 /// integer comparison.
4171 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4172                                          Value *FalseVal, const SimplifyQuery &Q,
4173                                          unsigned MaxRecurse) {
4174   ICmpInst::Predicate Pred;
4175   Value *CmpLHS, *CmpRHS;
4176   if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4177     return nullptr;
4178 
4179   // Canonicalize ne to eq predicate.
4180   if (Pred == ICmpInst::ICMP_NE) {
4181     Pred = ICmpInst::ICMP_EQ;
4182     std::swap(TrueVal, FalseVal);
4183   }
4184 
4185   // Check for integer min/max with a limit constant:
4186   // X > MIN_INT ? X : MIN_INT --> X
4187   // X < MAX_INT ? X : MAX_INT --> X
4188   if (TrueVal->getType()->isIntOrIntVectorTy()) {
4189     Value *X, *Y;
4190     SelectPatternFlavor SPF =
4191         matchDecomposedSelectPattern(cast<ICmpInst>(CondVal), TrueVal, FalseVal,
4192                                      X, Y).Flavor;
4193     if (SelectPatternResult::isMinOrMax(SPF) && Pred == getMinMaxPred(SPF)) {
4194       APInt LimitC = getMinMaxLimit(getInverseMinMaxFlavor(SPF),
4195                                     X->getType()->getScalarSizeInBits());
4196       if (match(Y, m_SpecificInt(LimitC)))
4197         return X;
4198     }
4199   }
4200 
4201   if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
4202     Value *X;
4203     const APInt *Y;
4204     if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
4205       if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4206                                            /*TrueWhenUnset=*/true))
4207         return V;
4208 
4209     // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4210     Value *ShAmt;
4211     auto isFsh = m_CombineOr(m_FShl(m_Value(X), m_Value(), m_Value(ShAmt)),
4212                              m_FShr(m_Value(), m_Value(X), m_Value(ShAmt)));
4213     // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4214     // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4215     if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
4216       return X;
4217 
4218     // Test for a zero-shift-guard-op around rotates. These are used to
4219     // avoid UB from oversized shifts in raw IR rotate patterns, but the
4220     // intrinsics do not have that problem.
4221     // We do not allow this transform for the general funnel shift case because
4222     // that would not preserve the poison safety of the original code.
4223     auto isRotate =
4224         m_CombineOr(m_FShl(m_Value(X), m_Deferred(X), m_Value(ShAmt)),
4225                     m_FShr(m_Value(X), m_Deferred(X), m_Value(ShAmt)));
4226     // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4227     // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4228     if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4229         Pred == ICmpInst::ICMP_EQ)
4230       return FalseVal;
4231 
4232     // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4233     // X == 0 ? -abs(X) : abs(X) --> abs(X)
4234     if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4235         match(FalseVal, m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))))
4236       return FalseVal;
4237     if (match(TrueVal,
4238               m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))) &&
4239         match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4240       return FalseVal;
4241   }
4242 
4243   // Check for other compares that behave like bit test.
4244   if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred,
4245                                               TrueVal, FalseVal))
4246     return V;
4247 
4248   // If we have a scalar equality comparison, then we know the value in one of
4249   // the arms of the select. See if substituting this value into the arm and
4250   // simplifying the result yields the same value as the other arm.
4251   // Note that the equivalence/replacement opportunity does not hold for vectors
4252   // because each element of a vector select is chosen independently.
4253   if (Pred == ICmpInst::ICMP_EQ && !CondVal->getType()->isVectorTy()) {
4254     if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q,
4255                                /* AllowRefinement */ false, MaxRecurse) ==
4256             TrueVal ||
4257         simplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q,
4258                                /* AllowRefinement */ false, MaxRecurse) ==
4259             TrueVal)
4260       return FalseVal;
4261     if (simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q,
4262                                /* AllowRefinement */ true, MaxRecurse) ==
4263             FalseVal ||
4264         simplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q,
4265                                /* AllowRefinement */ true, MaxRecurse) ==
4266             FalseVal)
4267       return FalseVal;
4268   }
4269 
4270   return nullptr;
4271 }
4272 
4273 /// Try to simplify a select instruction when its condition operand is a
4274 /// floating-point comparison.
4275 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F,
4276                                      const SimplifyQuery &Q) {
4277   FCmpInst::Predicate Pred;
4278   if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) &&
4279       !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T))))
4280     return nullptr;
4281 
4282   // This transform is safe if we do not have (do not care about) -0.0 or if
4283   // at least one operand is known to not be -0.0. Otherwise, the select can
4284   // change the sign of a zero operand.
4285   bool HasNoSignedZeros = Q.CxtI && isa<FPMathOperator>(Q.CxtI) &&
4286                           Q.CxtI->hasNoSignedZeros();
4287   const APFloat *C;
4288   if (HasNoSignedZeros || (match(T, m_APFloat(C)) && C->isNonZero()) ||
4289                           (match(F, m_APFloat(C)) && C->isNonZero())) {
4290     // (T == F) ? T : F --> F
4291     // (F == T) ? T : F --> F
4292     if (Pred == FCmpInst::FCMP_OEQ)
4293       return F;
4294 
4295     // (T != F) ? T : F --> T
4296     // (F != T) ? T : F --> T
4297     if (Pred == FCmpInst::FCMP_UNE)
4298       return T;
4299   }
4300 
4301   return nullptr;
4302 }
4303 
4304 /// Given operands for a SelectInst, see if we can fold the result.
4305 /// If not, this returns null.
4306 static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4307                                  const SimplifyQuery &Q, unsigned MaxRecurse) {
4308   if (auto *CondC = dyn_cast<Constant>(Cond)) {
4309     if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4310       if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4311         return ConstantFoldSelectInstruction(CondC, TrueC, FalseC);
4312 
4313     // select poison, X, Y -> poison
4314     if (isa<PoisonValue>(CondC))
4315       return PoisonValue::get(TrueVal->getType());
4316 
4317     // select undef, X, Y -> X or Y
4318     if (Q.isUndefValue(CondC))
4319       return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4320 
4321     // select true,  X, Y --> X
4322     // select false, X, Y --> Y
4323     // For vectors, allow undef/poison elements in the condition to match the
4324     // defined elements, so we can eliminate the select.
4325     if (match(CondC, m_One()))
4326       return TrueVal;
4327     if (match(CondC, m_Zero()))
4328       return FalseVal;
4329   }
4330 
4331   assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4332          "Select must have bool or bool vector condition");
4333   assert(TrueVal->getType() == FalseVal->getType() &&
4334          "Select must have same types for true/false ops");
4335 
4336   if (Cond->getType() == TrueVal->getType()) {
4337     // select i1 Cond, i1 true, i1 false --> i1 Cond
4338     if (match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
4339       return Cond;
4340 
4341     // (X || Y) && (X || !Y) --> X (commuted 8 ways)
4342     Value *X, *Y;
4343     if (match(FalseVal, m_ZeroInt())) {
4344       if (match(Cond, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
4345           match(TrueVal, m_c_LogicalOr(m_Specific(X), m_Specific(Y))))
4346         return X;
4347       if (match(TrueVal, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
4348           match(Cond, m_c_LogicalOr(m_Specific(X), m_Specific(Y))))
4349         return X;
4350     }
4351   }
4352 
4353   // select ?, X, X -> X
4354   if (TrueVal == FalseVal)
4355     return TrueVal;
4356 
4357   // If the true or false value is poison, we can fold to the other value.
4358   // If the true or false value is undef, we can fold to the other value as
4359   // long as the other value isn't poison.
4360   // select ?, poison, X -> X
4361   // select ?, undef,  X -> X
4362   if (isa<PoisonValue>(TrueVal) ||
4363       (Q.isUndefValue(TrueVal) &&
4364        isGuaranteedNotToBePoison(FalseVal, Q.AC, Q.CxtI, Q.DT)))
4365     return FalseVal;
4366   // select ?, X, poison -> X
4367   // select ?, X, undef  -> X
4368   if (isa<PoisonValue>(FalseVal) ||
4369       (Q.isUndefValue(FalseVal) &&
4370        isGuaranteedNotToBePoison(TrueVal, Q.AC, Q.CxtI, Q.DT)))
4371     return TrueVal;
4372 
4373   // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
4374   Constant *TrueC, *FalseC;
4375   if (isa<FixedVectorType>(TrueVal->getType()) &&
4376       match(TrueVal, m_Constant(TrueC)) &&
4377       match(FalseVal, m_Constant(FalseC))) {
4378     unsigned NumElts =
4379         cast<FixedVectorType>(TrueC->getType())->getNumElements();
4380     SmallVector<Constant *, 16> NewC;
4381     for (unsigned i = 0; i != NumElts; ++i) {
4382       // Bail out on incomplete vector constants.
4383       Constant *TEltC = TrueC->getAggregateElement(i);
4384       Constant *FEltC = FalseC->getAggregateElement(i);
4385       if (!TEltC || !FEltC)
4386         break;
4387 
4388       // If the elements match (undef or not), that value is the result. If only
4389       // one element is undef, choose the defined element as the safe result.
4390       if (TEltC == FEltC)
4391         NewC.push_back(TEltC);
4392       else if (isa<PoisonValue>(TEltC) ||
4393                (Q.isUndefValue(TEltC) && isGuaranteedNotToBePoison(FEltC)))
4394         NewC.push_back(FEltC);
4395       else if (isa<PoisonValue>(FEltC) ||
4396                (Q.isUndefValue(FEltC) && isGuaranteedNotToBePoison(TEltC)))
4397         NewC.push_back(TEltC);
4398       else
4399         break;
4400     }
4401     if (NewC.size() == NumElts)
4402       return ConstantVector::get(NewC);
4403   }
4404 
4405   if (Value *V =
4406           simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4407     return V;
4408 
4409   if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q))
4410     return V;
4411 
4412   if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal))
4413     return V;
4414 
4415   Optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
4416   if (Imp)
4417     return *Imp ? TrueVal : FalseVal;
4418 
4419   return nullptr;
4420 }
4421 
4422 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4423                                 const SimplifyQuery &Q) {
4424   return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
4425 }
4426 
4427 /// Given operands for an GetElementPtrInst, see if we can fold the result.
4428 /// If not, this returns null.
4429 static Value *SimplifyGEPInst(Type *SrcTy, Value *Ptr,
4430                               ArrayRef<Value *> Indices, bool InBounds,
4431                               const SimplifyQuery &Q, unsigned) {
4432   // The type of the GEP pointer operand.
4433   unsigned AS =
4434       cast<PointerType>(Ptr->getType()->getScalarType())->getAddressSpace();
4435 
4436   // getelementptr P -> P.
4437   if (Indices.empty())
4438     return Ptr;
4439 
4440   // Compute the (pointer) type returned by the GEP instruction.
4441   Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Indices);
4442   Type *GEPTy = PointerType::get(LastType, AS);
4443   if (VectorType *VT = dyn_cast<VectorType>(Ptr->getType()))
4444     GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4445   else {
4446     for (Value *Op : Indices) {
4447       // If one of the operands is a vector, the result type is a vector of
4448       // pointers. All vector operands must have the same number of elements.
4449       if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
4450         GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4451         break;
4452       }
4453     }
4454   }
4455 
4456   // For opaque pointers an all-zero GEP is a no-op. For typed pointers,
4457   // it may be equivalent to a bitcast.
4458   if (Ptr->getType()->isOpaquePointerTy() &&
4459       all_of(Indices, [](const auto *V) { return match(V, m_Zero()); }))
4460     return Ptr;
4461 
4462   // getelementptr poison, idx -> poison
4463   // getelementptr baseptr, poison -> poison
4464   if (isa<PoisonValue>(Ptr) ||
4465       any_of(Indices, [](const auto *V) { return isa<PoisonValue>(V); }))
4466     return PoisonValue::get(GEPTy);
4467 
4468   if (Q.isUndefValue(Ptr))
4469     // If inbounds, we can choose an out-of-bounds pointer as a base pointer.
4470     return InBounds ? PoisonValue::get(GEPTy) : UndefValue::get(GEPTy);
4471 
4472   bool IsScalableVec =
4473       isa<ScalableVectorType>(SrcTy) || any_of(Indices, [](const Value *V) {
4474         return isa<ScalableVectorType>(V->getType());
4475       });
4476 
4477   if (Indices.size() == 1) {
4478     // getelementptr P, 0 -> P.
4479     if (match(Indices[0], m_Zero()) && Ptr->getType() == GEPTy)
4480       return Ptr;
4481 
4482     Type *Ty = SrcTy;
4483     if (!IsScalableVec && Ty->isSized()) {
4484       Value *P;
4485       uint64_t C;
4486       uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
4487       // getelementptr P, N -> P if P points to a type of zero size.
4488       if (TyAllocSize == 0 && Ptr->getType() == GEPTy)
4489         return Ptr;
4490 
4491       // The following transforms are only safe if the ptrtoint cast
4492       // doesn't truncate the pointers.
4493       if (Indices[0]->getType()->getScalarSizeInBits() ==
4494           Q.DL.getPointerSizeInBits(AS)) {
4495         auto CanSimplify = [GEPTy, &P, Ptr]() -> bool {
4496           return P->getType() == GEPTy &&
4497                  getUnderlyingObject(P) == getUnderlyingObject(Ptr);
4498         };
4499         // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
4500         if (TyAllocSize == 1 &&
4501             match(Indices[0],
4502                   m_Sub(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Specific(Ptr)))) &&
4503             CanSimplify())
4504           return P;
4505 
4506         // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
4507         // size 1 << C.
4508         if (match(Indices[0], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
4509                                            m_PtrToInt(m_Specific(Ptr))),
4510                                      m_ConstantInt(C))) &&
4511             TyAllocSize == 1ULL << C && CanSimplify())
4512           return P;
4513 
4514         // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
4515         // size C.
4516         if (match(Indices[0], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
4517                                            m_PtrToInt(m_Specific(Ptr))),
4518                                      m_SpecificInt(TyAllocSize))) &&
4519             CanSimplify())
4520           return P;
4521       }
4522     }
4523   }
4524 
4525   if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
4526       all_of(Indices.drop_back(1),
4527              [](Value *Idx) { return match(Idx, m_Zero()); })) {
4528     unsigned IdxWidth =
4529         Q.DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace());
4530     if (Q.DL.getTypeSizeInBits(Indices.back()->getType()) == IdxWidth) {
4531       APInt BasePtrOffset(IdxWidth, 0);
4532       Value *StrippedBasePtr =
4533           Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.DL, BasePtrOffset);
4534 
4535       // Avoid creating inttoptr of zero here: While LLVMs treatment of
4536       // inttoptr is generally conservative, this particular case is folded to
4537       // a null pointer, which will have incorrect provenance.
4538 
4539       // gep (gep V, C), (sub 0, V) -> C
4540       if (match(Indices.back(),
4541                 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
4542           !BasePtrOffset.isZero()) {
4543         auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
4544         return ConstantExpr::getIntToPtr(CI, GEPTy);
4545       }
4546       // gep (gep V, C), (xor V, -1) -> C-1
4547       if (match(Indices.back(),
4548                 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
4549           !BasePtrOffset.isOne()) {
4550         auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
4551         return ConstantExpr::getIntToPtr(CI, GEPTy);
4552       }
4553     }
4554   }
4555 
4556   // Check to see if this is constant foldable.
4557   if (!isa<Constant>(Ptr) ||
4558       !all_of(Indices, [](Value *V) { return isa<Constant>(V); }))
4559     return nullptr;
4560 
4561   auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ptr), Indices,
4562                                             InBounds);
4563   return ConstantFoldConstant(CE, Q.DL);
4564 }
4565 
4566 Value *llvm::SimplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
4567                              bool InBounds, const SimplifyQuery &Q) {
4568   return ::SimplifyGEPInst(SrcTy, Ptr, Indices, InBounds, Q, RecursionLimit);
4569 }
4570 
4571 /// Given operands for an InsertValueInst, see if we can fold the result.
4572 /// If not, this returns null.
4573 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
4574                                       ArrayRef<unsigned> Idxs, const SimplifyQuery &Q,
4575                                       unsigned) {
4576   if (Constant *CAgg = dyn_cast<Constant>(Agg))
4577     if (Constant *CVal = dyn_cast<Constant>(Val))
4578       return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
4579 
4580   // insertvalue x, undef, n -> x
4581   if (Q.isUndefValue(Val))
4582     return Agg;
4583 
4584   // insertvalue x, (extractvalue y, n), n
4585   if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
4586     if (EV->getAggregateOperand()->getType() == Agg->getType() &&
4587         EV->getIndices() == Idxs) {
4588       // insertvalue undef, (extractvalue y, n), n -> y
4589       if (Q.isUndefValue(Agg))
4590         return EV->getAggregateOperand();
4591 
4592       // insertvalue y, (extractvalue y, n), n -> y
4593       if (Agg == EV->getAggregateOperand())
4594         return Agg;
4595     }
4596 
4597   return nullptr;
4598 }
4599 
4600 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
4601                                      ArrayRef<unsigned> Idxs,
4602                                      const SimplifyQuery &Q) {
4603   return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
4604 }
4605 
4606 Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
4607                                        const SimplifyQuery &Q) {
4608   // Try to constant fold.
4609   auto *VecC = dyn_cast<Constant>(Vec);
4610   auto *ValC = dyn_cast<Constant>(Val);
4611   auto *IdxC = dyn_cast<Constant>(Idx);
4612   if (VecC && ValC && IdxC)
4613     return ConstantExpr::getInsertElement(VecC, ValC, IdxC);
4614 
4615   // For fixed-length vector, fold into poison if index is out of bounds.
4616   if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
4617     if (isa<FixedVectorType>(Vec->getType()) &&
4618         CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
4619       return PoisonValue::get(Vec->getType());
4620   }
4621 
4622   // If index is undef, it might be out of bounds (see above case)
4623   if (Q.isUndefValue(Idx))
4624     return PoisonValue::get(Vec->getType());
4625 
4626   // If the scalar is poison, or it is undef and there is no risk of
4627   // propagating poison from the vector value, simplify to the vector value.
4628   if (isa<PoisonValue>(Val) ||
4629       (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
4630     return Vec;
4631 
4632   // If we are extracting a value from a vector, then inserting it into the same
4633   // place, that's the input vector:
4634   // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
4635   if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
4636     return Vec;
4637 
4638   return nullptr;
4639 }
4640 
4641 /// Given operands for an ExtractValueInst, see if we can fold the result.
4642 /// If not, this returns null.
4643 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4644                                        const SimplifyQuery &, unsigned) {
4645   if (auto *CAgg = dyn_cast<Constant>(Agg))
4646     return ConstantFoldExtractValueInstruction(CAgg, Idxs);
4647 
4648   // extractvalue x, (insertvalue y, elt, n), n -> elt
4649   unsigned NumIdxs = Idxs.size();
4650   for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
4651        IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
4652     ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
4653     unsigned NumInsertValueIdxs = InsertValueIdxs.size();
4654     unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
4655     if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
4656         Idxs.slice(0, NumCommonIdxs)) {
4657       if (NumIdxs == NumInsertValueIdxs)
4658         return IVI->getInsertedValueOperand();
4659       break;
4660     }
4661   }
4662 
4663   return nullptr;
4664 }
4665 
4666 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4667                                       const SimplifyQuery &Q) {
4668   return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
4669 }
4670 
4671 /// Given operands for an ExtractElementInst, see if we can fold the result.
4672 /// If not, this returns null.
4673 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
4674                                          const SimplifyQuery &Q, unsigned) {
4675   auto *VecVTy = cast<VectorType>(Vec->getType());
4676   if (auto *CVec = dyn_cast<Constant>(Vec)) {
4677     if (auto *CIdx = dyn_cast<Constant>(Idx))
4678       return ConstantExpr::getExtractElement(CVec, CIdx);
4679 
4680     if (Q.isUndefValue(Vec))
4681       return UndefValue::get(VecVTy->getElementType());
4682   }
4683 
4684   // An undef extract index can be arbitrarily chosen to be an out-of-range
4685   // index value, which would result in the instruction being poison.
4686   if (Q.isUndefValue(Idx))
4687     return PoisonValue::get(VecVTy->getElementType());
4688 
4689   // If extracting a specified index from the vector, see if we can recursively
4690   // find a previously computed scalar that was inserted into the vector.
4691   if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
4692     // For fixed-length vector, fold into undef if index is out of bounds.
4693     unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
4694     if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
4695       return PoisonValue::get(VecVTy->getElementType());
4696     // Handle case where an element is extracted from a splat.
4697     if (IdxC->getValue().ult(MinNumElts))
4698       if (auto *Splat = getSplatValue(Vec))
4699         return Splat;
4700     if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
4701       return Elt;
4702   } else {
4703     // The index is not relevant if our vector is a splat.
4704     if (Value *Splat = getSplatValue(Vec))
4705       return Splat;
4706   }
4707   return nullptr;
4708 }
4709 
4710 Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx,
4711                                         const SimplifyQuery &Q) {
4712   return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
4713 }
4714 
4715 /// See if we can fold the given phi. If not, returns null.
4716 static Value *SimplifyPHINode(PHINode *PN, ArrayRef<Value *> IncomingValues,
4717                               const SimplifyQuery &Q) {
4718   // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
4719   //          here, because the PHI we may succeed simplifying to was not
4720   //          def-reachable from the original PHI!
4721 
4722   // If all of the PHI's incoming values are the same then replace the PHI node
4723   // with the common value.
4724   Value *CommonValue = nullptr;
4725   bool HasUndefInput = false;
4726   for (Value *Incoming : IncomingValues) {
4727     // If the incoming value is the phi node itself, it can safely be skipped.
4728     if (Incoming == PN) continue;
4729     if (Q.isUndefValue(Incoming)) {
4730       // Remember that we saw an undef value, but otherwise ignore them.
4731       HasUndefInput = true;
4732       continue;
4733     }
4734     if (CommonValue && Incoming != CommonValue)
4735       return nullptr;  // Not the same, bail out.
4736     CommonValue = Incoming;
4737   }
4738 
4739   // If CommonValue is null then all of the incoming values were either undef or
4740   // equal to the phi node itself.
4741   if (!CommonValue)
4742     return UndefValue::get(PN->getType());
4743 
4744   // If we have a PHI node like phi(X, undef, X), where X is defined by some
4745   // instruction, we cannot return X as the result of the PHI node unless it
4746   // dominates the PHI block.
4747   if (HasUndefInput)
4748     return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
4749 
4750   return CommonValue;
4751 }
4752 
4753 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
4754                                Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) {
4755   if (auto *C = dyn_cast<Constant>(Op))
4756     return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
4757 
4758   if (auto *CI = dyn_cast<CastInst>(Op)) {
4759     auto *Src = CI->getOperand(0);
4760     Type *SrcTy = Src->getType();
4761     Type *MidTy = CI->getType();
4762     Type *DstTy = Ty;
4763     if (Src->getType() == Ty) {
4764       auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
4765       auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
4766       Type *SrcIntPtrTy =
4767           SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
4768       Type *MidIntPtrTy =
4769           MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
4770       Type *DstIntPtrTy =
4771           DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
4772       if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
4773                                          SrcIntPtrTy, MidIntPtrTy,
4774                                          DstIntPtrTy) == Instruction::BitCast)
4775         return Src;
4776     }
4777   }
4778 
4779   // bitcast x -> x
4780   if (CastOpc == Instruction::BitCast)
4781     if (Op->getType() == Ty)
4782       return Op;
4783 
4784   return nullptr;
4785 }
4786 
4787 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
4788                               const SimplifyQuery &Q) {
4789   return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
4790 }
4791 
4792 /// For the given destination element of a shuffle, peek through shuffles to
4793 /// match a root vector source operand that contains that element in the same
4794 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
4795 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
4796                                    int MaskVal, Value *RootVec,
4797                                    unsigned MaxRecurse) {
4798   if (!MaxRecurse--)
4799     return nullptr;
4800 
4801   // Bail out if any mask value is undefined. That kind of shuffle may be
4802   // simplified further based on demanded bits or other folds.
4803   if (MaskVal == -1)
4804     return nullptr;
4805 
4806   // The mask value chooses which source operand we need to look at next.
4807   int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
4808   int RootElt = MaskVal;
4809   Value *SourceOp = Op0;
4810   if (MaskVal >= InVecNumElts) {
4811     RootElt = MaskVal - InVecNumElts;
4812     SourceOp = Op1;
4813   }
4814 
4815   // If the source operand is a shuffle itself, look through it to find the
4816   // matching root vector.
4817   if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
4818     return foldIdentityShuffles(
4819         DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
4820         SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
4821   }
4822 
4823   // TODO: Look through bitcasts? What if the bitcast changes the vector element
4824   // size?
4825 
4826   // The source operand is not a shuffle. Initialize the root vector value for
4827   // this shuffle if that has not been done yet.
4828   if (!RootVec)
4829     RootVec = SourceOp;
4830 
4831   // Give up as soon as a source operand does not match the existing root value.
4832   if (RootVec != SourceOp)
4833     return nullptr;
4834 
4835   // The element must be coming from the same lane in the source vector
4836   // (although it may have crossed lanes in intermediate shuffles).
4837   if (RootElt != DestElt)
4838     return nullptr;
4839 
4840   return RootVec;
4841 }
4842 
4843 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
4844                                         ArrayRef<int> Mask, Type *RetTy,
4845                                         const SimplifyQuery &Q,
4846                                         unsigned MaxRecurse) {
4847   if (all_of(Mask, [](int Elem) { return Elem == UndefMaskElem; }))
4848     return UndefValue::get(RetTy);
4849 
4850   auto *InVecTy = cast<VectorType>(Op0->getType());
4851   unsigned MaskNumElts = Mask.size();
4852   ElementCount InVecEltCount = InVecTy->getElementCount();
4853 
4854   bool Scalable = InVecEltCount.isScalable();
4855 
4856   SmallVector<int, 32> Indices;
4857   Indices.assign(Mask.begin(), Mask.end());
4858 
4859   // Canonicalization: If mask does not select elements from an input vector,
4860   // replace that input vector with poison.
4861   if (!Scalable) {
4862     bool MaskSelects0 = false, MaskSelects1 = false;
4863     unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
4864     for (unsigned i = 0; i != MaskNumElts; ++i) {
4865       if (Indices[i] == -1)
4866         continue;
4867       if ((unsigned)Indices[i] < InVecNumElts)
4868         MaskSelects0 = true;
4869       else
4870         MaskSelects1 = true;
4871     }
4872     if (!MaskSelects0)
4873       Op0 = PoisonValue::get(InVecTy);
4874     if (!MaskSelects1)
4875       Op1 = PoisonValue::get(InVecTy);
4876   }
4877 
4878   auto *Op0Const = dyn_cast<Constant>(Op0);
4879   auto *Op1Const = dyn_cast<Constant>(Op1);
4880 
4881   // If all operands are constant, constant fold the shuffle. This
4882   // transformation depends on the value of the mask which is not known at
4883   // compile time for scalable vectors
4884   if (Op0Const && Op1Const)
4885     return ConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
4886 
4887   // Canonicalization: if only one input vector is constant, it shall be the
4888   // second one. This transformation depends on the value of the mask which
4889   // is not known at compile time for scalable vectors
4890   if (!Scalable && Op0Const && !Op1Const) {
4891     std::swap(Op0, Op1);
4892     ShuffleVectorInst::commuteShuffleMask(Indices,
4893                                           InVecEltCount.getKnownMinValue());
4894   }
4895 
4896   // A splat of an inserted scalar constant becomes a vector constant:
4897   // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
4898   // NOTE: We may have commuted above, so analyze the updated Indices, not the
4899   //       original mask constant.
4900   // NOTE: This transformation depends on the value of the mask which is not
4901   // known at compile time for scalable vectors
4902   Constant *C;
4903   ConstantInt *IndexC;
4904   if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
4905                                           m_ConstantInt(IndexC)))) {
4906     // Match a splat shuffle mask of the insert index allowing undef elements.
4907     int InsertIndex = IndexC->getZExtValue();
4908     if (all_of(Indices, [InsertIndex](int MaskElt) {
4909           return MaskElt == InsertIndex || MaskElt == -1;
4910         })) {
4911       assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
4912 
4913       // Shuffle mask undefs become undefined constant result elements.
4914       SmallVector<Constant *, 16> VecC(MaskNumElts, C);
4915       for (unsigned i = 0; i != MaskNumElts; ++i)
4916         if (Indices[i] == -1)
4917           VecC[i] = UndefValue::get(C->getType());
4918       return ConstantVector::get(VecC);
4919     }
4920   }
4921 
4922   // A shuffle of a splat is always the splat itself. Legal if the shuffle's
4923   // value type is same as the input vectors' type.
4924   if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
4925     if (Q.isUndefValue(Op1) && RetTy == InVecTy &&
4926         is_splat(OpShuf->getShuffleMask()))
4927       return Op0;
4928 
4929   // All remaining transformation depend on the value of the mask, which is
4930   // not known at compile time for scalable vectors.
4931   if (Scalable)
4932     return nullptr;
4933 
4934   // Don't fold a shuffle with undef mask elements. This may get folded in a
4935   // better way using demanded bits or other analysis.
4936   // TODO: Should we allow this?
4937   if (is_contained(Indices, -1))
4938     return nullptr;
4939 
4940   // Check if every element of this shuffle can be mapped back to the
4941   // corresponding element of a single root vector. If so, we don't need this
4942   // shuffle. This handles simple identity shuffles as well as chains of
4943   // shuffles that may widen/narrow and/or move elements across lanes and back.
4944   Value *RootVec = nullptr;
4945   for (unsigned i = 0; i != MaskNumElts; ++i) {
4946     // Note that recursion is limited for each vector element, so if any element
4947     // exceeds the limit, this will fail to simplify.
4948     RootVec =
4949         foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
4950 
4951     // We can't replace a widening/narrowing shuffle with one of its operands.
4952     if (!RootVec || RootVec->getType() != RetTy)
4953       return nullptr;
4954   }
4955   return RootVec;
4956 }
4957 
4958 /// Given operands for a ShuffleVectorInst, fold the result or return null.
4959 Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
4960                                        ArrayRef<int> Mask, Type *RetTy,
4961                                        const SimplifyQuery &Q) {
4962   return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
4963 }
4964 
4965 static Constant *foldConstant(Instruction::UnaryOps Opcode,
4966                               Value *&Op, const SimplifyQuery &Q) {
4967   if (auto *C = dyn_cast<Constant>(Op))
4968     return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
4969   return nullptr;
4970 }
4971 
4972 /// Given the operand for an FNeg, see if we can fold the result.  If not, this
4973 /// returns null.
4974 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF,
4975                                const SimplifyQuery &Q, unsigned MaxRecurse) {
4976   if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
4977     return C;
4978 
4979   Value *X;
4980   // fneg (fneg X) ==> X
4981   if (match(Op, m_FNeg(m_Value(X))))
4982     return X;
4983 
4984   return nullptr;
4985 }
4986 
4987 Value *llvm::SimplifyFNegInst(Value *Op, FastMathFlags FMF,
4988                               const SimplifyQuery &Q) {
4989   return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
4990 }
4991 
4992 static Constant *propagateNaN(Constant *In) {
4993   // If the input is a vector with undef elements, just return a default NaN.
4994   if (!In->isNaN())
4995     return ConstantFP::getNaN(In->getType());
4996 
4997   // Propagate the existing NaN constant when possible.
4998   // TODO: Should we quiet a signaling NaN?
4999   return In;
5000 }
5001 
5002 /// Perform folds that are common to any floating-point operation. This implies
5003 /// transforms based on poison/undef/NaN because the operation itself makes no
5004 /// difference to the result.
5005 static Constant *simplifyFPOp(ArrayRef<Value *> Ops, FastMathFlags FMF,
5006                               const SimplifyQuery &Q,
5007                               fp::ExceptionBehavior ExBehavior,
5008                               RoundingMode Rounding) {
5009   // Poison is independent of anything else. It always propagates from an
5010   // operand to a math result.
5011   if (any_of(Ops, [](Value *V) { return match(V, m_Poison()); }))
5012     return PoisonValue::get(Ops[0]->getType());
5013 
5014   for (Value *V : Ops) {
5015     bool IsNan = match(V, m_NaN());
5016     bool IsInf = match(V, m_Inf());
5017     bool IsUndef = Q.isUndefValue(V);
5018 
5019     // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
5020     // (an undef operand can be chosen to be Nan/Inf), then the result of
5021     // this operation is poison.
5022     if (FMF.noNaNs() && (IsNan || IsUndef))
5023       return PoisonValue::get(V->getType());
5024     if (FMF.noInfs() && (IsInf || IsUndef))
5025       return PoisonValue::get(V->getType());
5026 
5027     if (isDefaultFPEnvironment(ExBehavior, Rounding)) {
5028       if (IsUndef || IsNan)
5029         return propagateNaN(cast<Constant>(V));
5030     } else if (ExBehavior != fp::ebStrict) {
5031       if (IsNan)
5032         return propagateNaN(cast<Constant>(V));
5033     }
5034   }
5035   return nullptr;
5036 }
5037 
5038 /// Given operands for an FAdd, see if we can fold the result.  If not, this
5039 /// returns null.
5040 static Value *
5041 SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5042                  const SimplifyQuery &Q, unsigned MaxRecurse,
5043                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5044                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5045   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5046     if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
5047       return C;
5048 
5049   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5050     return C;
5051 
5052   // fadd X, -0 ==> X
5053   // With strict/constrained FP, we have these possible edge cases that do
5054   // not simplify to Op0:
5055   // fadd SNaN, -0.0 --> QNaN
5056   // fadd +0.0, -0.0 --> -0.0 (but only with round toward negative)
5057   if (canIgnoreSNaN(ExBehavior, FMF) &&
5058       (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5059        FMF.noSignedZeros()))
5060     if (match(Op1, m_NegZeroFP()))
5061       return Op0;
5062 
5063   // fadd X, 0 ==> X, when we know X is not -0
5064   if (canIgnoreSNaN(ExBehavior, FMF))
5065     if (match(Op1, m_PosZeroFP()) &&
5066         (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
5067       return Op0;
5068 
5069   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5070     return nullptr;
5071 
5072   // With nnan: -X + X --> 0.0 (and commuted variant)
5073   // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
5074   // Negative zeros are allowed because we always end up with positive zero:
5075   // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5076   // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5077   // X =  0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
5078   // X =  0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
5079   if (FMF.noNaNs()) {
5080     if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
5081         match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
5082       return ConstantFP::getNullValue(Op0->getType());
5083 
5084     if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5085         match(Op1, m_FNeg(m_Specific(Op0))))
5086       return ConstantFP::getNullValue(Op0->getType());
5087   }
5088 
5089   // (X - Y) + Y --> X
5090   // Y + (X - Y) --> X
5091   Value *X;
5092   if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5093       (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
5094        match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
5095     return X;
5096 
5097   return nullptr;
5098 }
5099 
5100 /// Given operands for an FSub, see if we can fold the result.  If not, this
5101 /// returns null.
5102 static Value *
5103 SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5104                  const SimplifyQuery &Q, unsigned MaxRecurse,
5105                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5106                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5107   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5108     if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
5109       return C;
5110 
5111   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5112     return C;
5113 
5114   // fsub X, +0 ==> X
5115   if (canIgnoreSNaN(ExBehavior, FMF) &&
5116       (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5117        FMF.noSignedZeros()))
5118     if (match(Op1, m_PosZeroFP()))
5119       return Op0;
5120 
5121   // fsub X, -0 ==> X, when we know X is not -0
5122   if (canIgnoreSNaN(ExBehavior, FMF))
5123     if (match(Op1, m_NegZeroFP()) &&
5124         (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
5125       return Op0;
5126 
5127   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5128     return nullptr;
5129 
5130   // fsub -0.0, (fsub -0.0, X) ==> X
5131   // fsub -0.0, (fneg X) ==> X
5132   Value *X;
5133   if (match(Op0, m_NegZeroFP()) &&
5134       match(Op1, m_FNeg(m_Value(X))))
5135     return X;
5136 
5137   // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
5138   // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
5139   if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
5140       (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
5141        match(Op1, m_FNeg(m_Value(X)))))
5142     return X;
5143 
5144   // fsub nnan x, x ==> 0.0
5145   if (FMF.noNaNs() && Op0 == Op1)
5146     return Constant::getNullValue(Op0->getType());
5147 
5148   // Y - (Y - X) --> X
5149   // (X + Y) - Y --> X
5150   if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5151       (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
5152        match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
5153     return X;
5154 
5155   return nullptr;
5156 }
5157 
5158 static Value *SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5159                               const SimplifyQuery &Q, unsigned MaxRecurse,
5160                               fp::ExceptionBehavior ExBehavior,
5161                               RoundingMode Rounding) {
5162   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5163     return C;
5164 
5165   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5166     return nullptr;
5167 
5168   // fmul X, 1.0 ==> X
5169   if (match(Op1, m_FPOne()))
5170     return Op0;
5171 
5172   // fmul 1.0, X ==> X
5173   if (match(Op0, m_FPOne()))
5174     return Op1;
5175 
5176   // fmul nnan nsz X, 0 ==> 0
5177   if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZeroFP()))
5178     return ConstantFP::getNullValue(Op0->getType());
5179 
5180   // fmul nnan nsz 0, X ==> 0
5181   if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
5182     return ConstantFP::getNullValue(Op1->getType());
5183 
5184   // sqrt(X) * sqrt(X) --> X, if we can:
5185   // 1. Remove the intermediate rounding (reassociate).
5186   // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
5187   // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
5188   Value *X;
5189   if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) &&
5190       FMF.allowReassoc() && FMF.noNaNs() && FMF.noSignedZeros())
5191     return X;
5192 
5193   return nullptr;
5194 }
5195 
5196 /// Given the operands for an FMul, see if we can fold the result
5197 static Value *
5198 SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5199                  const SimplifyQuery &Q, unsigned MaxRecurse,
5200                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5201                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5202   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5203     if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
5204       return C;
5205 
5206   // Now apply simplifications that do not require rounding.
5207   return SimplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5208 }
5209 
5210 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5211                               const SimplifyQuery &Q,
5212                               fp::ExceptionBehavior ExBehavior,
5213                               RoundingMode Rounding) {
5214   return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5215                             Rounding);
5216 }
5217 
5218 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5219                               const SimplifyQuery &Q,
5220                               fp::ExceptionBehavior ExBehavior,
5221                               RoundingMode Rounding) {
5222   return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5223                             Rounding);
5224 }
5225 
5226 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5227                               const SimplifyQuery &Q,
5228                               fp::ExceptionBehavior ExBehavior,
5229                               RoundingMode Rounding) {
5230   return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5231                             Rounding);
5232 }
5233 
5234 Value *llvm::SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5235                              const SimplifyQuery &Q,
5236                              fp::ExceptionBehavior ExBehavior,
5237                              RoundingMode Rounding) {
5238   return ::SimplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5239                            Rounding);
5240 }
5241 
5242 static Value *
5243 SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5244                  const SimplifyQuery &Q, unsigned,
5245                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5246                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5247   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5248     if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
5249       return C;
5250 
5251   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5252     return C;
5253 
5254   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5255     return nullptr;
5256 
5257   // X / 1.0 -> X
5258   if (match(Op1, m_FPOne()))
5259     return Op0;
5260 
5261   // 0 / X -> 0
5262   // Requires that NaNs are off (X could be zero) and signed zeroes are
5263   // ignored (X could be positive or negative, so the output sign is unknown).
5264   if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
5265     return ConstantFP::getNullValue(Op0->getType());
5266 
5267   if (FMF.noNaNs()) {
5268     // X / X -> 1.0 is legal when NaNs are ignored.
5269     // We can ignore infinities because INF/INF is NaN.
5270     if (Op0 == Op1)
5271       return ConstantFP::get(Op0->getType(), 1.0);
5272 
5273     // (X * Y) / Y --> X if we can reassociate to the above form.
5274     Value *X;
5275     if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
5276       return X;
5277 
5278     // -X /  X -> -1.0 and
5279     //  X / -X -> -1.0 are legal when NaNs are ignored.
5280     // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
5281     if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
5282         match(Op1, m_FNegNSZ(m_Specific(Op0))))
5283       return ConstantFP::get(Op0->getType(), -1.0);
5284   }
5285 
5286   return nullptr;
5287 }
5288 
5289 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5290                               const SimplifyQuery &Q,
5291                               fp::ExceptionBehavior ExBehavior,
5292                               RoundingMode Rounding) {
5293   return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5294                             Rounding);
5295 }
5296 
5297 static Value *
5298 SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5299                  const SimplifyQuery &Q, unsigned,
5300                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5301                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5302   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5303     if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
5304       return C;
5305 
5306   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5307     return C;
5308 
5309   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5310     return nullptr;
5311 
5312   // Unlike fdiv, the result of frem always matches the sign of the dividend.
5313   // The constant match may include undef elements in a vector, so return a full
5314   // zero constant as the result.
5315   if (FMF.noNaNs()) {
5316     // +0 % X -> 0
5317     if (match(Op0, m_PosZeroFP()))
5318       return ConstantFP::getNullValue(Op0->getType());
5319     // -0 % X -> -0
5320     if (match(Op0, m_NegZeroFP()))
5321       return ConstantFP::getNegativeZero(Op0->getType());
5322   }
5323 
5324   return nullptr;
5325 }
5326 
5327 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5328                               const SimplifyQuery &Q,
5329                               fp::ExceptionBehavior ExBehavior,
5330                               RoundingMode Rounding) {
5331   return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5332                             Rounding);
5333 }
5334 
5335 //=== Helper functions for higher up the class hierarchy.
5336 
5337 /// Given the operand for a UnaryOperator, see if we can fold the result.
5338 /// If not, this returns null.
5339 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
5340                            unsigned MaxRecurse) {
5341   switch (Opcode) {
5342   case Instruction::FNeg:
5343     return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
5344   default:
5345     llvm_unreachable("Unexpected opcode");
5346   }
5347 }
5348 
5349 /// Given the operand for a UnaryOperator, see if we can fold the result.
5350 /// If not, this returns null.
5351 /// Try to use FastMathFlags when folding the result.
5352 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
5353                              const FastMathFlags &FMF,
5354                              const SimplifyQuery &Q, unsigned MaxRecurse) {
5355   switch (Opcode) {
5356   case Instruction::FNeg:
5357     return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
5358   default:
5359     return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
5360   }
5361 }
5362 
5363 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
5364   return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
5365 }
5366 
5367 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
5368                           const SimplifyQuery &Q) {
5369   return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
5370 }
5371 
5372 /// Given operands for a BinaryOperator, see if we can fold the result.
5373 /// If not, this returns null.
5374 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5375                             const SimplifyQuery &Q, unsigned MaxRecurse) {
5376   switch (Opcode) {
5377   case Instruction::Add:
5378     return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse);
5379   case Instruction::Sub:
5380     return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse);
5381   case Instruction::Mul:
5382     return SimplifyMulInst(LHS, RHS, Q, MaxRecurse);
5383   case Instruction::SDiv:
5384     return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
5385   case Instruction::UDiv:
5386     return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
5387   case Instruction::SRem:
5388     return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
5389   case Instruction::URem:
5390     return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
5391   case Instruction::Shl:
5392     return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse);
5393   case Instruction::LShr:
5394     return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse);
5395   case Instruction::AShr:
5396     return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse);
5397   case Instruction::And:
5398     return SimplifyAndInst(LHS, RHS, Q, MaxRecurse);
5399   case Instruction::Or:
5400     return SimplifyOrInst(LHS, RHS, Q, MaxRecurse);
5401   case Instruction::Xor:
5402     return SimplifyXorInst(LHS, RHS, Q, MaxRecurse);
5403   case Instruction::FAdd:
5404     return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5405   case Instruction::FSub:
5406     return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5407   case Instruction::FMul:
5408     return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5409   case Instruction::FDiv:
5410     return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5411   case Instruction::FRem:
5412     return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5413   default:
5414     llvm_unreachable("Unexpected opcode");
5415   }
5416 }
5417 
5418 /// Given operands for a BinaryOperator, see if we can fold the result.
5419 /// If not, this returns null.
5420 /// Try to use FastMathFlags when folding the result.
5421 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5422                             const FastMathFlags &FMF, const SimplifyQuery &Q,
5423                             unsigned MaxRecurse) {
5424   switch (Opcode) {
5425   case Instruction::FAdd:
5426     return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
5427   case Instruction::FSub:
5428     return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
5429   case Instruction::FMul:
5430     return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
5431   case Instruction::FDiv:
5432     return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
5433   default:
5434     return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
5435   }
5436 }
5437 
5438 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5439                            const SimplifyQuery &Q) {
5440   return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
5441 }
5442 
5443 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5444                            FastMathFlags FMF, const SimplifyQuery &Q) {
5445   return ::SimplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
5446 }
5447 
5448 /// Given operands for a CmpInst, see if we can fold the result.
5449 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5450                               const SimplifyQuery &Q, unsigned MaxRecurse) {
5451   if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
5452     return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
5453   return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5454 }
5455 
5456 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5457                              const SimplifyQuery &Q) {
5458   return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
5459 }
5460 
5461 static bool IsIdempotent(Intrinsic::ID ID) {
5462   switch (ID) {
5463   default: return false;
5464 
5465   // Unary idempotent: f(f(x)) = f(x)
5466   case Intrinsic::fabs:
5467   case Intrinsic::floor:
5468   case Intrinsic::ceil:
5469   case Intrinsic::trunc:
5470   case Intrinsic::rint:
5471   case Intrinsic::nearbyint:
5472   case Intrinsic::round:
5473   case Intrinsic::roundeven:
5474   case Intrinsic::canonicalize:
5475     return true;
5476   }
5477 }
5478 
5479 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset,
5480                                    const DataLayout &DL) {
5481   GlobalValue *PtrSym;
5482   APInt PtrOffset;
5483   if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
5484     return nullptr;
5485 
5486   Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext());
5487   Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
5488   Type *Int32PtrTy = Int32Ty->getPointerTo();
5489   Type *Int64Ty = Type::getInt64Ty(Ptr->getContext());
5490 
5491   auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
5492   if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
5493     return nullptr;
5494 
5495   uint64_t OffsetInt = OffsetConstInt->getSExtValue();
5496   if (OffsetInt % 4 != 0)
5497     return nullptr;
5498 
5499   Constant *C = ConstantExpr::getGetElementPtr(
5500       Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy),
5501       ConstantInt::get(Int64Ty, OffsetInt / 4));
5502   Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL);
5503   if (!Loaded)
5504     return nullptr;
5505 
5506   auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
5507   if (!LoadedCE)
5508     return nullptr;
5509 
5510   if (LoadedCE->getOpcode() == Instruction::Trunc) {
5511     LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
5512     if (!LoadedCE)
5513       return nullptr;
5514   }
5515 
5516   if (LoadedCE->getOpcode() != Instruction::Sub)
5517     return nullptr;
5518 
5519   auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
5520   if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
5521     return nullptr;
5522   auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
5523 
5524   Constant *LoadedRHS = LoadedCE->getOperand(1);
5525   GlobalValue *LoadedRHSSym;
5526   APInt LoadedRHSOffset;
5527   if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
5528                                   DL) ||
5529       PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
5530     return nullptr;
5531 
5532   return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy);
5533 }
5534 
5535 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
5536                                      const SimplifyQuery &Q) {
5537   // Idempotent functions return the same result when called repeatedly.
5538   Intrinsic::ID IID = F->getIntrinsicID();
5539   if (IsIdempotent(IID))
5540     if (auto *II = dyn_cast<IntrinsicInst>(Op0))
5541       if (II->getIntrinsicID() == IID)
5542         return II;
5543 
5544   Value *X;
5545   switch (IID) {
5546   case Intrinsic::fabs:
5547     if (SignBitMustBeZero(Op0, Q.TLI)) return Op0;
5548     break;
5549   case Intrinsic::bswap:
5550     // bswap(bswap(x)) -> x
5551     if (match(Op0, m_BSwap(m_Value(X)))) return X;
5552     break;
5553   case Intrinsic::bitreverse:
5554     // bitreverse(bitreverse(x)) -> x
5555     if (match(Op0, m_BitReverse(m_Value(X)))) return X;
5556     break;
5557   case Intrinsic::ctpop: {
5558     // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
5559     // ctpop(and X, 1) --> and X, 1
5560     unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
5561     if (MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, BitWidth - 1),
5562                           Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
5563       return Op0;
5564     break;
5565   }
5566   case Intrinsic::exp:
5567     // exp(log(x)) -> x
5568     if (Q.CxtI->hasAllowReassoc() &&
5569         match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X)))) return X;
5570     break;
5571   case Intrinsic::exp2:
5572     // exp2(log2(x)) -> x
5573     if (Q.CxtI->hasAllowReassoc() &&
5574         match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X)))) return X;
5575     break;
5576   case Intrinsic::log:
5577     // log(exp(x)) -> x
5578     if (Q.CxtI->hasAllowReassoc() &&
5579         match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X)))) return X;
5580     break;
5581   case Intrinsic::log2:
5582     // log2(exp2(x)) -> x
5583     if (Q.CxtI->hasAllowReassoc() &&
5584         (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) ||
5585          match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0),
5586                                                 m_Value(X))))) return X;
5587     break;
5588   case Intrinsic::log10:
5589     // log10(pow(10.0, x)) -> x
5590     if (Q.CxtI->hasAllowReassoc() &&
5591         match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0),
5592                                                m_Value(X)))) return X;
5593     break;
5594   case Intrinsic::floor:
5595   case Intrinsic::trunc:
5596   case Intrinsic::ceil:
5597   case Intrinsic::round:
5598   case Intrinsic::roundeven:
5599   case Intrinsic::nearbyint:
5600   case Intrinsic::rint: {
5601     // floor (sitofp x) -> sitofp x
5602     // floor (uitofp x) -> uitofp x
5603     //
5604     // Converting from int always results in a finite integral number or
5605     // infinity. For either of those inputs, these rounding functions always
5606     // return the same value, so the rounding can be eliminated.
5607     if (match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value())))
5608       return Op0;
5609     break;
5610   }
5611   case Intrinsic::experimental_vector_reverse:
5612     // experimental.vector.reverse(experimental.vector.reverse(x)) -> x
5613     if (match(Op0,
5614               m_Intrinsic<Intrinsic::experimental_vector_reverse>(m_Value(X))))
5615       return X;
5616     // experimental.vector.reverse(splat(X)) -> splat(X)
5617     if (isSplatValue(Op0))
5618       return Op0;
5619     break;
5620   default:
5621     break;
5622   }
5623 
5624   return nullptr;
5625 }
5626 
5627 /// Given a min/max intrinsic, see if it can be removed based on having an
5628 /// operand that is another min/max intrinsic with shared operand(s). The caller
5629 /// is expected to swap the operand arguments to handle commutation.
5630 static Value *foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1) {
5631   Value *X, *Y;
5632   if (!match(Op0, m_MaxOrMin(m_Value(X), m_Value(Y))))
5633     return nullptr;
5634 
5635   auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
5636   if (!MM0)
5637     return nullptr;
5638   Intrinsic::ID IID0 = MM0->getIntrinsicID();
5639 
5640   if (Op1 == X || Op1 == Y ||
5641       match(Op1, m_c_MaxOrMin(m_Specific(X), m_Specific(Y)))) {
5642     // max (max X, Y), X --> max X, Y
5643     if (IID0 == IID)
5644       return MM0;
5645     // max (min X, Y), X --> X
5646     if (IID0 == getInverseMinMaxIntrinsic(IID))
5647       return Op1;
5648   }
5649   return nullptr;
5650 }
5651 
5652 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
5653                                       const SimplifyQuery &Q) {
5654   Intrinsic::ID IID = F->getIntrinsicID();
5655   Type *ReturnType = F->getReturnType();
5656   unsigned BitWidth = ReturnType->getScalarSizeInBits();
5657   switch (IID) {
5658   case Intrinsic::abs:
5659     // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
5660     // It is always ok to pick the earlier abs. We'll just lose nsw if its only
5661     // on the outer abs.
5662     if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(), m_Value())))
5663       return Op0;
5664     break;
5665 
5666   case Intrinsic::cttz: {
5667     Value *X;
5668     if (match(Op0, m_Shl(m_One(), m_Value(X))))
5669       return X;
5670     break;
5671   }
5672   case Intrinsic::ctlz: {
5673     Value *X;
5674     if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
5675       return X;
5676     if (match(Op0, m_AShr(m_Negative(), m_Value())))
5677       return Constant::getNullValue(ReturnType);
5678     break;
5679   }
5680   case Intrinsic::smax:
5681   case Intrinsic::smin:
5682   case Intrinsic::umax:
5683   case Intrinsic::umin: {
5684     // If the arguments are the same, this is a no-op.
5685     if (Op0 == Op1)
5686       return Op0;
5687 
5688     // Canonicalize constant operand as Op1.
5689     if (isa<Constant>(Op0))
5690       std::swap(Op0, Op1);
5691 
5692     // Assume undef is the limit value.
5693     if (Q.isUndefValue(Op1))
5694       return ConstantInt::get(
5695           ReturnType, MinMaxIntrinsic::getSaturationPoint(IID, BitWidth));
5696 
5697     const APInt *C;
5698     if (match(Op1, m_APIntAllowUndef(C))) {
5699       // Clamp to limit value. For example:
5700       // umax(i8 %x, i8 255) --> 255
5701       if (*C == MinMaxIntrinsic::getSaturationPoint(IID, BitWidth))
5702         return ConstantInt::get(ReturnType, *C);
5703 
5704       // If the constant op is the opposite of the limit value, the other must
5705       // be larger/smaller or equal. For example:
5706       // umin(i8 %x, i8 255) --> %x
5707       if (*C == MinMaxIntrinsic::getSaturationPoint(
5708                     getInverseMinMaxIntrinsic(IID), BitWidth))
5709         return Op0;
5710 
5711       // Remove nested call if constant operands allow it. Example:
5712       // max (max X, 7), 5 -> max X, 7
5713       auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
5714       if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
5715         // TODO: loosen undef/splat restrictions for vector constants.
5716         Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
5717         const APInt *InnerC;
5718         if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
5719             ICmpInst::compare(*InnerC, *C,
5720                               ICmpInst::getNonStrictPredicate(
5721                                   MinMaxIntrinsic::getPredicate(IID))))
5722           return Op0;
5723       }
5724     }
5725 
5726     if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
5727       return V;
5728     if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
5729       return V;
5730 
5731     ICmpInst::Predicate Pred =
5732         ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
5733     if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
5734       return Op0;
5735     if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
5736       return Op1;
5737 
5738     if (Optional<bool> Imp =
5739             isImpliedByDomCondition(Pred, Op0, Op1, Q.CxtI, Q.DL))
5740       return *Imp ? Op0 : Op1;
5741     if (Optional<bool> Imp =
5742             isImpliedByDomCondition(Pred, Op1, Op0, Q.CxtI, Q.DL))
5743       return *Imp ? Op1 : Op0;
5744 
5745     break;
5746   }
5747   case Intrinsic::usub_with_overflow:
5748   case Intrinsic::ssub_with_overflow:
5749     // X - X -> { 0, false }
5750     // X - undef -> { 0, false }
5751     // undef - X -> { 0, false }
5752     if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5753       return Constant::getNullValue(ReturnType);
5754     break;
5755   case Intrinsic::uadd_with_overflow:
5756   case Intrinsic::sadd_with_overflow:
5757     // X + undef -> { -1, false }
5758     // undef + x -> { -1, false }
5759     if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
5760       return ConstantStruct::get(
5761           cast<StructType>(ReturnType),
5762           {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
5763            Constant::getNullValue(ReturnType->getStructElementType(1))});
5764     }
5765     break;
5766   case Intrinsic::umul_with_overflow:
5767   case Intrinsic::smul_with_overflow:
5768     // 0 * X -> { 0, false }
5769     // X * 0 -> { 0, false }
5770     if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
5771       return Constant::getNullValue(ReturnType);
5772     // undef * X -> { 0, false }
5773     // X * undef -> { 0, false }
5774     if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5775       return Constant::getNullValue(ReturnType);
5776     break;
5777   case Intrinsic::uadd_sat:
5778     // sat(MAX + X) -> MAX
5779     // sat(X + MAX) -> MAX
5780     if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
5781       return Constant::getAllOnesValue(ReturnType);
5782     LLVM_FALLTHROUGH;
5783   case Intrinsic::sadd_sat:
5784     // sat(X + undef) -> -1
5785     // sat(undef + X) -> -1
5786     // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
5787     // For signed: Assume undef is ~X, in which case X + ~X = -1.
5788     if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5789       return Constant::getAllOnesValue(ReturnType);
5790 
5791     // X + 0 -> X
5792     if (match(Op1, m_Zero()))
5793       return Op0;
5794     // 0 + X -> X
5795     if (match(Op0, m_Zero()))
5796       return Op1;
5797     break;
5798   case Intrinsic::usub_sat:
5799     // sat(0 - X) -> 0, sat(X - MAX) -> 0
5800     if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
5801       return Constant::getNullValue(ReturnType);
5802     LLVM_FALLTHROUGH;
5803   case Intrinsic::ssub_sat:
5804     // X - X -> 0, X - undef -> 0, undef - X -> 0
5805     if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5806       return Constant::getNullValue(ReturnType);
5807     // X - 0 -> X
5808     if (match(Op1, m_Zero()))
5809       return Op0;
5810     break;
5811   case Intrinsic::load_relative:
5812     if (auto *C0 = dyn_cast<Constant>(Op0))
5813       if (auto *C1 = dyn_cast<Constant>(Op1))
5814         return SimplifyRelativeLoad(C0, C1, Q.DL);
5815     break;
5816   case Intrinsic::powi:
5817     if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
5818       // powi(x, 0) -> 1.0
5819       if (Power->isZero())
5820         return ConstantFP::get(Op0->getType(), 1.0);
5821       // powi(x, 1) -> x
5822       if (Power->isOne())
5823         return Op0;
5824     }
5825     break;
5826   case Intrinsic::copysign:
5827     // copysign X, X --> X
5828     if (Op0 == Op1)
5829       return Op0;
5830     // copysign -X, X --> X
5831     // copysign X, -X --> -X
5832     if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5833         match(Op1, m_FNeg(m_Specific(Op0))))
5834       return Op1;
5835     break;
5836   case Intrinsic::maxnum:
5837   case Intrinsic::minnum:
5838   case Intrinsic::maximum:
5839   case Intrinsic::minimum: {
5840     // If the arguments are the same, this is a no-op.
5841     if (Op0 == Op1) return Op0;
5842 
5843     // Canonicalize constant operand as Op1.
5844     if (isa<Constant>(Op0))
5845       std::swap(Op0, Op1);
5846 
5847     // If an argument is undef, return the other argument.
5848     if (Q.isUndefValue(Op1))
5849       return Op0;
5850 
5851     bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
5852     bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
5853 
5854     // minnum(X, nan) -> X
5855     // maxnum(X, nan) -> X
5856     // minimum(X, nan) -> nan
5857     // maximum(X, nan) -> nan
5858     if (match(Op1, m_NaN()))
5859       return PropagateNaN ? propagateNaN(cast<Constant>(Op1)) : Op0;
5860 
5861     // In the following folds, inf can be replaced with the largest finite
5862     // float, if the ninf flag is set.
5863     const APFloat *C;
5864     if (match(Op1, m_APFloat(C)) &&
5865         (C->isInfinity() || (Q.CxtI->hasNoInfs() && C->isLargest()))) {
5866       // minnum(X, -inf) -> -inf
5867       // maxnum(X, +inf) -> +inf
5868       // minimum(X, -inf) -> -inf if nnan
5869       // maximum(X, +inf) -> +inf if nnan
5870       if (C->isNegative() == IsMin && (!PropagateNaN || Q.CxtI->hasNoNaNs()))
5871         return ConstantFP::get(ReturnType, *C);
5872 
5873       // minnum(X, +inf) -> X if nnan
5874       // maxnum(X, -inf) -> X if nnan
5875       // minimum(X, +inf) -> X
5876       // maximum(X, -inf) -> X
5877       if (C->isNegative() != IsMin && (PropagateNaN || Q.CxtI->hasNoNaNs()))
5878         return Op0;
5879     }
5880 
5881     // Min/max of the same operation with common operand:
5882     // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
5883     if (auto *M0 = dyn_cast<IntrinsicInst>(Op0))
5884       if (M0->getIntrinsicID() == IID &&
5885           (M0->getOperand(0) == Op1 || M0->getOperand(1) == Op1))
5886         return Op0;
5887     if (auto *M1 = dyn_cast<IntrinsicInst>(Op1))
5888       if (M1->getIntrinsicID() == IID &&
5889           (M1->getOperand(0) == Op0 || M1->getOperand(1) == Op0))
5890         return Op1;
5891 
5892     break;
5893   }
5894   case Intrinsic::experimental_vector_extract: {
5895     Type *ReturnType = F->getReturnType();
5896 
5897     // (extract_vector (insert_vector _, X, 0), 0) -> X
5898     unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
5899     Value *X = nullptr;
5900     if (match(Op0, m_Intrinsic<Intrinsic::experimental_vector_insert>(
5901                        m_Value(), m_Value(X), m_Zero())) &&
5902         IdxN == 0 && X->getType() == ReturnType)
5903       return X;
5904 
5905     break;
5906   }
5907   default:
5908     break;
5909   }
5910 
5911   return nullptr;
5912 }
5913 
5914 static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
5915 
5916   unsigned NumOperands = Call->arg_size();
5917   Function *F = cast<Function>(Call->getCalledFunction());
5918   Intrinsic::ID IID = F->getIntrinsicID();
5919 
5920   // Most of the intrinsics with no operands have some kind of side effect.
5921   // Don't simplify.
5922   if (!NumOperands) {
5923     switch (IID) {
5924     case Intrinsic::vscale: {
5925       // Call may not be inserted into the IR yet at point of calling simplify.
5926       if (!Call->getParent() || !Call->getParent()->getParent())
5927         return nullptr;
5928       auto Attr = Call->getFunction()->getFnAttribute(Attribute::VScaleRange);
5929       if (!Attr.isValid())
5930         return nullptr;
5931       unsigned VScaleMin = Attr.getVScaleRangeMin();
5932       Optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
5933       if (VScaleMax && VScaleMin == VScaleMax)
5934         return ConstantInt::get(F->getReturnType(), VScaleMin);
5935       return nullptr;
5936     }
5937     default:
5938       return nullptr;
5939     }
5940   }
5941 
5942   if (NumOperands == 1)
5943     return simplifyUnaryIntrinsic(F, Call->getArgOperand(0), Q);
5944 
5945   if (NumOperands == 2)
5946     return simplifyBinaryIntrinsic(F, Call->getArgOperand(0),
5947                                    Call->getArgOperand(1), Q);
5948 
5949   // Handle intrinsics with 3 or more arguments.
5950   switch (IID) {
5951   case Intrinsic::masked_load:
5952   case Intrinsic::masked_gather: {
5953     Value *MaskArg = Call->getArgOperand(2);
5954     Value *PassthruArg = Call->getArgOperand(3);
5955     // If the mask is all zeros or undef, the "passthru" argument is the result.
5956     if (maskIsAllZeroOrUndef(MaskArg))
5957       return PassthruArg;
5958     return nullptr;
5959   }
5960   case Intrinsic::fshl:
5961   case Intrinsic::fshr: {
5962     Value *Op0 = Call->getArgOperand(0), *Op1 = Call->getArgOperand(1),
5963           *ShAmtArg = Call->getArgOperand(2);
5964 
5965     // If both operands are undef, the result is undef.
5966     if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
5967       return UndefValue::get(F->getReturnType());
5968 
5969     // If shift amount is undef, assume it is zero.
5970     if (Q.isUndefValue(ShAmtArg))
5971       return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1);
5972 
5973     const APInt *ShAmtC;
5974     if (match(ShAmtArg, m_APInt(ShAmtC))) {
5975       // If there's effectively no shift, return the 1st arg or 2nd arg.
5976       APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
5977       if (ShAmtC->urem(BitWidth).isZero())
5978         return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1);
5979     }
5980 
5981     // Rotating zero by anything is zero.
5982     if (match(Op0, m_Zero()) && match(Op1, m_Zero()))
5983       return ConstantInt::getNullValue(F->getReturnType());
5984 
5985     // Rotating -1 by anything is -1.
5986     if (match(Op0, m_AllOnes()) && match(Op1, m_AllOnes()))
5987       return ConstantInt::getAllOnesValue(F->getReturnType());
5988 
5989     return nullptr;
5990   }
5991   case Intrinsic::experimental_constrained_fma: {
5992     Value *Op0 = Call->getArgOperand(0);
5993     Value *Op1 = Call->getArgOperand(1);
5994     Value *Op2 = Call->getArgOperand(2);
5995     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
5996     if (Value *V = simplifyFPOp({Op0, Op1, Op2}, {}, Q,
5997                                 FPI->getExceptionBehavior().getValue(),
5998                                 FPI->getRoundingMode().getValue()))
5999       return V;
6000     return nullptr;
6001   }
6002   case Intrinsic::fma:
6003   case Intrinsic::fmuladd: {
6004     Value *Op0 = Call->getArgOperand(0);
6005     Value *Op1 = Call->getArgOperand(1);
6006     Value *Op2 = Call->getArgOperand(2);
6007     if (Value *V = simplifyFPOp({Op0, Op1, Op2}, {}, Q, fp::ebIgnore,
6008                                 RoundingMode::NearestTiesToEven))
6009       return V;
6010     return nullptr;
6011   }
6012   case Intrinsic::smul_fix:
6013   case Intrinsic::smul_fix_sat: {
6014     Value *Op0 = Call->getArgOperand(0);
6015     Value *Op1 = Call->getArgOperand(1);
6016     Value *Op2 = Call->getArgOperand(2);
6017     Type *ReturnType = F->getReturnType();
6018 
6019     // Canonicalize constant operand as Op1 (ConstantFolding handles the case
6020     // when both Op0 and Op1 are constant so we do not care about that special
6021     // case here).
6022     if (isa<Constant>(Op0))
6023       std::swap(Op0, Op1);
6024 
6025     // X * 0 -> 0
6026     if (match(Op1, m_Zero()))
6027       return Constant::getNullValue(ReturnType);
6028 
6029     // X * undef -> 0
6030     if (Q.isUndefValue(Op1))
6031       return Constant::getNullValue(ReturnType);
6032 
6033     // X * (1 << Scale) -> X
6034     APInt ScaledOne =
6035         APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
6036                             cast<ConstantInt>(Op2)->getZExtValue());
6037     if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
6038       return Op0;
6039 
6040     return nullptr;
6041   }
6042   case Intrinsic::experimental_vector_insert: {
6043     Value *Vec = Call->getArgOperand(0);
6044     Value *SubVec = Call->getArgOperand(1);
6045     Value *Idx = Call->getArgOperand(2);
6046     Type *ReturnType = F->getReturnType();
6047 
6048     // (insert_vector Y, (extract_vector X, 0), 0) -> X
6049     // where: Y is X, or Y is undef
6050     unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6051     Value *X = nullptr;
6052     if (match(SubVec, m_Intrinsic<Intrinsic::experimental_vector_extract>(
6053                           m_Value(X), m_Zero())) &&
6054         (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
6055         X->getType() == ReturnType)
6056       return X;
6057 
6058     return nullptr;
6059   }
6060   case Intrinsic::experimental_constrained_fadd: {
6061     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6062     return SimplifyFAddInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
6063                             FPI->getFastMathFlags(), Q,
6064                             FPI->getExceptionBehavior().getValue(),
6065                             FPI->getRoundingMode().getValue());
6066     break;
6067   }
6068   case Intrinsic::experimental_constrained_fsub: {
6069     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6070     return SimplifyFSubInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
6071                             FPI->getFastMathFlags(), Q,
6072                             FPI->getExceptionBehavior().getValue(),
6073                             FPI->getRoundingMode().getValue());
6074     break;
6075   }
6076   case Intrinsic::experimental_constrained_fmul: {
6077     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6078     return SimplifyFMulInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
6079                             FPI->getFastMathFlags(), Q,
6080                             FPI->getExceptionBehavior().getValue(),
6081                             FPI->getRoundingMode().getValue());
6082     break;
6083   }
6084   case Intrinsic::experimental_constrained_fdiv: {
6085     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6086     return SimplifyFDivInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
6087                             FPI->getFastMathFlags(), Q,
6088                             FPI->getExceptionBehavior().getValue(),
6089                             FPI->getRoundingMode().getValue());
6090     break;
6091   }
6092   case Intrinsic::experimental_constrained_frem: {
6093     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6094     return SimplifyFRemInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
6095                             FPI->getFastMathFlags(), Q,
6096                             FPI->getExceptionBehavior().getValue(),
6097                             FPI->getRoundingMode().getValue());
6098     break;
6099   }
6100   default:
6101     return nullptr;
6102   }
6103 }
6104 
6105 static Value *tryConstantFoldCall(CallBase *Call, const SimplifyQuery &Q) {
6106   auto *F = dyn_cast<Function>(Call->getCalledOperand());
6107   if (!F || !canConstantFoldCallTo(Call, F))
6108     return nullptr;
6109 
6110   SmallVector<Constant *, 4> ConstantArgs;
6111   unsigned NumArgs = Call->arg_size();
6112   ConstantArgs.reserve(NumArgs);
6113   for (auto &Arg : Call->args()) {
6114     Constant *C = dyn_cast<Constant>(&Arg);
6115     if (!C) {
6116       if (isa<MetadataAsValue>(Arg.get()))
6117         continue;
6118       return nullptr;
6119     }
6120     ConstantArgs.push_back(C);
6121   }
6122 
6123   return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
6124 }
6125 
6126 Value *llvm::SimplifyCall(CallBase *Call, const SimplifyQuery &Q) {
6127   // musttail calls can only be simplified if they are also DCEd.
6128   // As we can't guarantee this here, don't simplify them.
6129   if (Call->isMustTailCall())
6130     return nullptr;
6131 
6132   // call undef -> poison
6133   // call null -> poison
6134   Value *Callee = Call->getCalledOperand();
6135   if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
6136     return PoisonValue::get(Call->getType());
6137 
6138   if (Value *V = tryConstantFoldCall(Call, Q))
6139     return V;
6140 
6141   auto *F = dyn_cast<Function>(Callee);
6142   if (F && F->isIntrinsic())
6143     if (Value *Ret = simplifyIntrinsic(Call, Q))
6144       return Ret;
6145 
6146   return nullptr;
6147 }
6148 
6149 /// Given operands for a Freeze, see if we can fold the result.
6150 static Value *SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
6151   // Use a utility function defined in ValueTracking.
6152   if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT))
6153     return Op0;
6154   // We have room for improvement.
6155   return nullptr;
6156 }
6157 
6158 Value *llvm::SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
6159   return ::SimplifyFreezeInst(Op0, Q);
6160 }
6161 
6162 static Value *SimplifyLoadInst(LoadInst *LI, Value *PtrOp,
6163                                const SimplifyQuery &Q) {
6164   if (LI->isVolatile())
6165     return nullptr;
6166 
6167   APInt Offset(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()), 0);
6168   auto *PtrOpC = dyn_cast<Constant>(PtrOp);
6169   // Try to convert operand into a constant by stripping offsets while looking
6170   // through invariant.group intrinsics. Don't bother if the underlying object
6171   // is not constant, as calculating GEP offsets is expensive.
6172   if (!PtrOpC && isa<Constant>(getUnderlyingObject(PtrOp))) {
6173     PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
6174         Q.DL, Offset, /* AllowNonInbounts */ true,
6175         /* AllowInvariantGroup */ true);
6176     // Index size may have changed due to address space casts.
6177     Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
6178     PtrOpC = dyn_cast<Constant>(PtrOp);
6179   }
6180 
6181   if (PtrOpC)
6182     return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Offset, Q.DL);
6183   return nullptr;
6184 }
6185 
6186 /// See if we can compute a simplified version of this instruction.
6187 /// If not, this returns null.
6188 
6189 static Value *simplifyInstructionWithOperands(Instruction *I,
6190                                               ArrayRef<Value *> NewOps,
6191                                               const SimplifyQuery &SQ,
6192                                               OptimizationRemarkEmitter *ORE) {
6193   const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
6194   Value *Result = nullptr;
6195 
6196   switch (I->getOpcode()) {
6197   default:
6198     if (llvm::all_of(NewOps, [](Value *V) { return isa<Constant>(V); })) {
6199       SmallVector<Constant *, 8> NewConstOps(NewOps.size());
6200       transform(NewOps, NewConstOps.begin(),
6201                 [](Value *V) { return cast<Constant>(V); });
6202       Result = ConstantFoldInstOperands(I, NewConstOps, Q.DL, Q.TLI);
6203     }
6204     break;
6205   case Instruction::FNeg:
6206     Result = SimplifyFNegInst(NewOps[0], I->getFastMathFlags(), Q);
6207     break;
6208   case Instruction::FAdd:
6209     Result = SimplifyFAddInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6210     break;
6211   case Instruction::Add:
6212     Result = SimplifyAddInst(
6213         NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6214         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
6215     break;
6216   case Instruction::FSub:
6217     Result = SimplifyFSubInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6218     break;
6219   case Instruction::Sub:
6220     Result = SimplifySubInst(
6221         NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6222         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
6223     break;
6224   case Instruction::FMul:
6225     Result = SimplifyFMulInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6226     break;
6227   case Instruction::Mul:
6228     Result = SimplifyMulInst(NewOps[0], NewOps[1], Q);
6229     break;
6230   case Instruction::SDiv:
6231     Result = SimplifySDivInst(NewOps[0], NewOps[1], Q);
6232     break;
6233   case Instruction::UDiv:
6234     Result = SimplifyUDivInst(NewOps[0], NewOps[1], Q);
6235     break;
6236   case Instruction::FDiv:
6237     Result = SimplifyFDivInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6238     break;
6239   case Instruction::SRem:
6240     Result = SimplifySRemInst(NewOps[0], NewOps[1], Q);
6241     break;
6242   case Instruction::URem:
6243     Result = SimplifyURemInst(NewOps[0], NewOps[1], Q);
6244     break;
6245   case Instruction::FRem:
6246     Result = SimplifyFRemInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6247     break;
6248   case Instruction::Shl:
6249     Result = SimplifyShlInst(
6250         NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6251         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
6252     break;
6253   case Instruction::LShr:
6254     Result = SimplifyLShrInst(NewOps[0], NewOps[1],
6255                               Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
6256     break;
6257   case Instruction::AShr:
6258     Result = SimplifyAShrInst(NewOps[0], NewOps[1],
6259                               Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
6260     break;
6261   case Instruction::And:
6262     Result = SimplifyAndInst(NewOps[0], NewOps[1], Q);
6263     break;
6264   case Instruction::Or:
6265     Result = SimplifyOrInst(NewOps[0], NewOps[1], Q);
6266     break;
6267   case Instruction::Xor:
6268     Result = SimplifyXorInst(NewOps[0], NewOps[1], Q);
6269     break;
6270   case Instruction::ICmp:
6271     Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), NewOps[0],
6272                               NewOps[1], Q);
6273     break;
6274   case Instruction::FCmp:
6275     Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
6276                               NewOps[1], I->getFastMathFlags(), Q);
6277     break;
6278   case Instruction::Select:
6279     Result = SimplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q);
6280     break;
6281   case Instruction::GetElementPtr: {
6282     auto *GEPI = cast<GetElementPtrInst>(I);
6283     Result =
6284         SimplifyGEPInst(GEPI->getSourceElementType(), NewOps[0],
6285                         makeArrayRef(NewOps).slice(1), GEPI->isInBounds(), Q);
6286     break;
6287   }
6288   case Instruction::InsertValue: {
6289     InsertValueInst *IV = cast<InsertValueInst>(I);
6290     Result = SimplifyInsertValueInst(NewOps[0], NewOps[1], IV->getIndices(), Q);
6291     break;
6292   }
6293   case Instruction::InsertElement: {
6294     Result = SimplifyInsertElementInst(NewOps[0], NewOps[1], NewOps[2], Q);
6295     break;
6296   }
6297   case Instruction::ExtractValue: {
6298     auto *EVI = cast<ExtractValueInst>(I);
6299     Result = SimplifyExtractValueInst(NewOps[0], EVI->getIndices(), Q);
6300     break;
6301   }
6302   case Instruction::ExtractElement: {
6303     Result = SimplifyExtractElementInst(NewOps[0], NewOps[1], Q);
6304     break;
6305   }
6306   case Instruction::ShuffleVector: {
6307     auto *SVI = cast<ShuffleVectorInst>(I);
6308     Result = SimplifyShuffleVectorInst(
6309         NewOps[0], NewOps[1], SVI->getShuffleMask(), SVI->getType(), Q);
6310     break;
6311   }
6312   case Instruction::PHI:
6313     Result = SimplifyPHINode(cast<PHINode>(I), NewOps, Q);
6314     break;
6315   case Instruction::Call: {
6316     // TODO: Use NewOps
6317     Result = SimplifyCall(cast<CallInst>(I), Q);
6318     break;
6319   }
6320   case Instruction::Freeze:
6321     Result = llvm::SimplifyFreezeInst(NewOps[0], Q);
6322     break;
6323 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
6324 #include "llvm/IR/Instruction.def"
6325 #undef HANDLE_CAST_INST
6326     Result = SimplifyCastInst(I->getOpcode(), NewOps[0], I->getType(), Q);
6327     break;
6328   case Instruction::Alloca:
6329     // No simplifications for Alloca and it can't be constant folded.
6330     Result = nullptr;
6331     break;
6332   case Instruction::Load:
6333     Result = SimplifyLoadInst(cast<LoadInst>(I), NewOps[0], Q);
6334     break;
6335   }
6336 
6337   /// If called on unreachable code, the above logic may report that the
6338   /// instruction simplified to itself.  Make life easier for users by
6339   /// detecting that case here, returning a safe value instead.
6340   return Result == I ? UndefValue::get(I->getType()) : Result;
6341 }
6342 
6343 Value *llvm::SimplifyInstructionWithOperands(Instruction *I,
6344                                              ArrayRef<Value *> NewOps,
6345                                              const SimplifyQuery &SQ,
6346                                              OptimizationRemarkEmitter *ORE) {
6347   assert(NewOps.size() == I->getNumOperands() &&
6348          "Number of operands should match the instruction!");
6349   return ::simplifyInstructionWithOperands(I, NewOps, SQ, ORE);
6350 }
6351 
6352 Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
6353                                  OptimizationRemarkEmitter *ORE) {
6354   SmallVector<Value *, 8> Ops(I->operands());
6355   return ::simplifyInstructionWithOperands(I, Ops, SQ, ORE);
6356 }
6357 
6358 /// Implementation of recursive simplification through an instruction's
6359 /// uses.
6360 ///
6361 /// This is the common implementation of the recursive simplification routines.
6362 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
6363 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
6364 /// instructions to process and attempt to simplify it using
6365 /// InstructionSimplify. Recursively visited users which could not be
6366 /// simplified themselves are to the optional UnsimplifiedUsers set for
6367 /// further processing by the caller.
6368 ///
6369 /// This routine returns 'true' only when *it* simplifies something. The passed
6370 /// in simplified value does not count toward this.
6371 static bool replaceAndRecursivelySimplifyImpl(
6372     Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
6373     const DominatorTree *DT, AssumptionCache *AC,
6374     SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
6375   bool Simplified = false;
6376   SmallSetVector<Instruction *, 8> Worklist;
6377   const DataLayout &DL = I->getModule()->getDataLayout();
6378 
6379   // If we have an explicit value to collapse to, do that round of the
6380   // simplification loop by hand initially.
6381   if (SimpleV) {
6382     for (User *U : I->users())
6383       if (U != I)
6384         Worklist.insert(cast<Instruction>(U));
6385 
6386     // Replace the instruction with its simplified value.
6387     I->replaceAllUsesWith(SimpleV);
6388 
6389     // Gracefully handle edge cases where the instruction is not wired into any
6390     // parent block.
6391     if (I->getParent() && !I->isEHPad() && !I->isTerminator() &&
6392         !I->mayHaveSideEffects())
6393       I->eraseFromParent();
6394   } else {
6395     Worklist.insert(I);
6396   }
6397 
6398   // Note that we must test the size on each iteration, the worklist can grow.
6399   for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
6400     I = Worklist[Idx];
6401 
6402     // See if this instruction simplifies.
6403     SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC});
6404     if (!SimpleV) {
6405       if (UnsimplifiedUsers)
6406         UnsimplifiedUsers->insert(I);
6407       continue;
6408     }
6409 
6410     Simplified = true;
6411 
6412     // Stash away all the uses of the old instruction so we can check them for
6413     // recursive simplifications after a RAUW. This is cheaper than checking all
6414     // uses of To on the recursive step in most cases.
6415     for (User *U : I->users())
6416       Worklist.insert(cast<Instruction>(U));
6417 
6418     // Replace the instruction with its simplified value.
6419     I->replaceAllUsesWith(SimpleV);
6420 
6421     // Gracefully handle edge cases where the instruction is not wired into any
6422     // parent block.
6423     if (I->getParent() && !I->isEHPad() && !I->isTerminator() &&
6424         !I->mayHaveSideEffects())
6425       I->eraseFromParent();
6426   }
6427   return Simplified;
6428 }
6429 
6430 bool llvm::replaceAndRecursivelySimplify(
6431     Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
6432     const DominatorTree *DT, AssumptionCache *AC,
6433     SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
6434   assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
6435   assert(SimpleV && "Must provide a simplified value.");
6436   return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
6437                                            UnsimplifiedUsers);
6438 }
6439 
6440 namespace llvm {
6441 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
6442   auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
6443   auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
6444   auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
6445   auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
6446   auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
6447   auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
6448   return {F.getParent()->getDataLayout(), TLI, DT, AC};
6449 }
6450 
6451 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
6452                                          const DataLayout &DL) {
6453   return {DL, &AR.TLI, &AR.DT, &AR.AC};
6454 }
6455 
6456 template <class T, class... TArgs>
6457 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
6458                                          Function &F) {
6459   auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
6460   auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
6461   auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
6462   return {F.getParent()->getDataLayout(), TLI, DT, AC};
6463 }
6464 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
6465                                                   Function &);
6466 }
6467 
6468 void InstSimplifyFolder::anchor() {}
6469