1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements routines for folding instructions into simpler forms
10 // that do not require creating new instructions.  This does constant folding
11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13 // ("and i32 %x, %x" -> "%x").  All operands are assumed to have already been
14 // simplified: This is usually true and assuming it simplifies the logic (if
15 // they have not been simplified then results are correct but maybe suboptimal).
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/CaptureTracking.h"
26 #include "llvm/Analysis/CmpInstAnalysis.h"
27 #include "llvm/Analysis/ConstantFolding.h"
28 #include "llvm/Analysis/LoopAnalysisManager.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/OverflowInstAnalysis.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/Analysis/VectorUtils.h"
33 #include "llvm/IR/ConstantRange.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/GetElementPtrTypeIterator.h"
37 #include "llvm/IR/GlobalAlias.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/PatternMatch.h"
42 #include "llvm/IR/ValueHandle.h"
43 #include "llvm/Support/KnownBits.h"
44 #include <algorithm>
45 using namespace llvm;
46 using namespace llvm::PatternMatch;
47 
48 #define DEBUG_TYPE "instsimplify"
49 
50 enum { RecursionLimit = 3 };
51 
52 STATISTIC(NumExpand,  "Number of expansions");
53 STATISTIC(NumReassoc, "Number of reassociations");
54 
55 static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned);
56 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
57 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
58                              const SimplifyQuery &, unsigned);
59 static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
60                             unsigned);
61 static Value *SimplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
62                             const SimplifyQuery &, unsigned);
63 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &,
64                               unsigned);
65 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
66                                const SimplifyQuery &Q, unsigned MaxRecurse);
67 static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
68 static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned);
69 static Value *SimplifyCastInst(unsigned, Value *, Type *,
70                                const SimplifyQuery &, unsigned);
71 static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, const SimplifyQuery &,
72                               unsigned);
73 static Value *SimplifySelectInst(Value *, Value *, Value *,
74                                  const SimplifyQuery &, unsigned);
75 
76 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal,
77                                      Value *FalseVal) {
78   BinaryOperator::BinaryOps BinOpCode;
79   if (auto *BO = dyn_cast<BinaryOperator>(Cond))
80     BinOpCode = BO->getOpcode();
81   else
82     return nullptr;
83 
84   CmpInst::Predicate ExpectedPred, Pred1, Pred2;
85   if (BinOpCode == BinaryOperator::Or) {
86     ExpectedPred = ICmpInst::ICMP_NE;
87   } else if (BinOpCode == BinaryOperator::And) {
88     ExpectedPred = ICmpInst::ICMP_EQ;
89   } else
90     return nullptr;
91 
92   // %A = icmp eq %TV, %FV
93   // %B = icmp eq %X, %Y (and one of these is a select operand)
94   // %C = and %A, %B
95   // %D = select %C, %TV, %FV
96   // -->
97   // %FV
98 
99   // %A = icmp ne %TV, %FV
100   // %B = icmp ne %X, %Y (and one of these is a select operand)
101   // %C = or %A, %B
102   // %D = select %C, %TV, %FV
103   // -->
104   // %TV
105   Value *X, *Y;
106   if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal),
107                                       m_Specific(FalseVal)),
108                              m_ICmp(Pred2, m_Value(X), m_Value(Y)))) ||
109       Pred1 != Pred2 || Pred1 != ExpectedPred)
110     return nullptr;
111 
112   if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal)
113     return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal;
114 
115   return nullptr;
116 }
117 
118 /// For a boolean type or a vector of boolean type, return false or a vector
119 /// with every element false.
120 static Constant *getFalse(Type *Ty) {
121   return ConstantInt::getFalse(Ty);
122 }
123 
124 /// For a boolean type or a vector of boolean type, return true or a vector
125 /// with every element true.
126 static Constant *getTrue(Type *Ty) {
127   return ConstantInt::getTrue(Ty);
128 }
129 
130 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
131 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
132                           Value *RHS) {
133   CmpInst *Cmp = dyn_cast<CmpInst>(V);
134   if (!Cmp)
135     return false;
136   CmpInst::Predicate CPred = Cmp->getPredicate();
137   Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
138   if (CPred == Pred && CLHS == LHS && CRHS == RHS)
139     return true;
140   return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
141     CRHS == LHS;
142 }
143 
144 /// Simplify comparison with true or false branch of select:
145 ///  %sel = select i1 %cond, i32 %tv, i32 %fv
146 ///  %cmp = icmp sle i32 %sel, %rhs
147 /// Compose new comparison by substituting %sel with either %tv or %fv
148 /// and see if it simplifies.
149 static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS,
150                                  Value *RHS, Value *Cond,
151                                  const SimplifyQuery &Q, unsigned MaxRecurse,
152                                  Constant *TrueOrFalse) {
153   Value *SimplifiedCmp = SimplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
154   if (SimplifiedCmp == Cond) {
155     // %cmp simplified to the select condition (%cond).
156     return TrueOrFalse;
157   } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
158     // It didn't simplify. However, if composed comparison is equivalent
159     // to the select condition (%cond) then we can replace it.
160     return TrueOrFalse;
161   }
162   return SimplifiedCmp;
163 }
164 
165 /// Simplify comparison with true branch of select
166 static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS,
167                                      Value *RHS, Value *Cond,
168                                      const SimplifyQuery &Q,
169                                      unsigned MaxRecurse) {
170   return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
171                             getTrue(Cond->getType()));
172 }
173 
174 /// Simplify comparison with false branch of select
175 static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS,
176                                       Value *RHS, Value *Cond,
177                                       const SimplifyQuery &Q,
178                                       unsigned MaxRecurse) {
179   return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
180                             getFalse(Cond->getType()));
181 }
182 
183 /// We know comparison with both branches of select can be simplified, but they
184 /// are not equal. This routine handles some logical simplifications.
185 static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp,
186                                                Value *Cond,
187                                                const SimplifyQuery &Q,
188                                                unsigned MaxRecurse) {
189   // If the false value simplified to false, then the result of the compare
190   // is equal to "Cond && TCmp".  This also catches the case when the false
191   // value simplified to false and the true value to true, returning "Cond".
192   if (match(FCmp, m_Zero()))
193     if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
194       return V;
195   // If the true value simplified to true, then the result of the compare
196   // is equal to "Cond || FCmp".
197   if (match(TCmp, m_One()))
198     if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
199       return V;
200   // Finally, if the false value simplified to true and the true value to
201   // false, then the result of the compare is equal to "!Cond".
202   if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
203     if (Value *V = SimplifyXorInst(
204             Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
205       return V;
206   return nullptr;
207 }
208 
209 /// Does the given value dominate the specified phi node?
210 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
211   Instruction *I = dyn_cast<Instruction>(V);
212   if (!I)
213     // Arguments and constants dominate all instructions.
214     return true;
215 
216   // If we are processing instructions (and/or basic blocks) that have not been
217   // fully added to a function, the parent nodes may still be null. Simply
218   // return the conservative answer in these cases.
219   if (!I->getParent() || !P->getParent() || !I->getFunction())
220     return false;
221 
222   // If we have a DominatorTree then do a precise test.
223   if (DT)
224     return DT->dominates(I, P);
225 
226   // Otherwise, if the instruction is in the entry block and is not an invoke,
227   // then it obviously dominates all phi nodes.
228   if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
229       !isa<CallBrInst>(I))
230     return true;
231 
232   return false;
233 }
234 
235 /// Try to simplify a binary operator of form "V op OtherOp" where V is
236 /// "(B0 opex B1)" by distributing 'op' across 'opex' as
237 /// "(B0 op OtherOp) opex (B1 op OtherOp)".
238 static Value *expandBinOp(Instruction::BinaryOps Opcode, Value *V,
239                           Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
240                           const SimplifyQuery &Q, unsigned MaxRecurse) {
241   auto *B = dyn_cast<BinaryOperator>(V);
242   if (!B || B->getOpcode() != OpcodeToExpand)
243     return nullptr;
244   Value *B0 = B->getOperand(0), *B1 = B->getOperand(1);
245   Value *L = SimplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(),
246                            MaxRecurse);
247   if (!L)
248     return nullptr;
249   Value *R = SimplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(),
250                            MaxRecurse);
251   if (!R)
252     return nullptr;
253 
254   // Does the expanded pair of binops simplify to the existing binop?
255   if ((L == B0 && R == B1) ||
256       (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
257     ++NumExpand;
258     return B;
259   }
260 
261   // Otherwise, return "L op' R" if it simplifies.
262   Value *S = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
263   if (!S)
264     return nullptr;
265 
266   ++NumExpand;
267   return S;
268 }
269 
270 /// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
271 /// distributing op over op'.
272 static Value *expandCommutativeBinOp(Instruction::BinaryOps Opcode,
273                                      Value *L, Value *R,
274                                      Instruction::BinaryOps OpcodeToExpand,
275                                      const SimplifyQuery &Q,
276                                      unsigned MaxRecurse) {
277   // Recursion is always used, so bail out at once if we already hit the limit.
278   if (!MaxRecurse--)
279     return nullptr;
280 
281   if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
282     return V;
283   if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
284     return V;
285   return nullptr;
286 }
287 
288 /// Generic simplifications for associative binary operations.
289 /// Returns the simpler value, or null if none was found.
290 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
291                                        Value *LHS, Value *RHS,
292                                        const SimplifyQuery &Q,
293                                        unsigned MaxRecurse) {
294   assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
295 
296   // Recursion is always used, so bail out at once if we already hit the limit.
297   if (!MaxRecurse--)
298     return nullptr;
299 
300   BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
301   BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
302 
303   // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
304   if (Op0 && Op0->getOpcode() == Opcode) {
305     Value *A = Op0->getOperand(0);
306     Value *B = Op0->getOperand(1);
307     Value *C = RHS;
308 
309     // Does "B op C" simplify?
310     if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
311       // It does!  Return "A op V" if it simplifies or is already available.
312       // If V equals B then "A op V" is just the LHS.
313       if (V == B) return LHS;
314       // Otherwise return "A op V" if it simplifies.
315       if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
316         ++NumReassoc;
317         return W;
318       }
319     }
320   }
321 
322   // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
323   if (Op1 && Op1->getOpcode() == Opcode) {
324     Value *A = LHS;
325     Value *B = Op1->getOperand(0);
326     Value *C = Op1->getOperand(1);
327 
328     // Does "A op B" simplify?
329     if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
330       // It does!  Return "V op C" if it simplifies or is already available.
331       // If V equals B then "V op C" is just the RHS.
332       if (V == B) return RHS;
333       // Otherwise return "V op C" if it simplifies.
334       if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
335         ++NumReassoc;
336         return W;
337       }
338     }
339   }
340 
341   // The remaining transforms require commutativity as well as associativity.
342   if (!Instruction::isCommutative(Opcode))
343     return nullptr;
344 
345   // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
346   if (Op0 && Op0->getOpcode() == Opcode) {
347     Value *A = Op0->getOperand(0);
348     Value *B = Op0->getOperand(1);
349     Value *C = RHS;
350 
351     // Does "C op A" simplify?
352     if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
353       // It does!  Return "V op B" if it simplifies or is already available.
354       // If V equals A then "V op B" is just the LHS.
355       if (V == A) return LHS;
356       // Otherwise return "V op B" if it simplifies.
357       if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
358         ++NumReassoc;
359         return W;
360       }
361     }
362   }
363 
364   // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
365   if (Op1 && Op1->getOpcode() == Opcode) {
366     Value *A = LHS;
367     Value *B = Op1->getOperand(0);
368     Value *C = Op1->getOperand(1);
369 
370     // Does "C op A" simplify?
371     if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
372       // It does!  Return "B op V" if it simplifies or is already available.
373       // If V equals C then "B op V" is just the RHS.
374       if (V == C) return RHS;
375       // Otherwise return "B op V" if it simplifies.
376       if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
377         ++NumReassoc;
378         return W;
379       }
380     }
381   }
382 
383   return nullptr;
384 }
385 
386 /// In the case of a binary operation with a select instruction as an operand,
387 /// try to simplify the binop by seeing whether evaluating it on both branches
388 /// of the select results in the same value. Returns the common value if so,
389 /// otherwise returns null.
390 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
391                                     Value *RHS, const SimplifyQuery &Q,
392                                     unsigned MaxRecurse) {
393   // Recursion is always used, so bail out at once if we already hit the limit.
394   if (!MaxRecurse--)
395     return nullptr;
396 
397   SelectInst *SI;
398   if (isa<SelectInst>(LHS)) {
399     SI = cast<SelectInst>(LHS);
400   } else {
401     assert(isa<SelectInst>(RHS) && "No select instruction operand!");
402     SI = cast<SelectInst>(RHS);
403   }
404 
405   // Evaluate the BinOp on the true and false branches of the select.
406   Value *TV;
407   Value *FV;
408   if (SI == LHS) {
409     TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
410     FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
411   } else {
412     TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
413     FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
414   }
415 
416   // If they simplified to the same value, then return the common value.
417   // If they both failed to simplify then return null.
418   if (TV == FV)
419     return TV;
420 
421   // If one branch simplified to undef, return the other one.
422   if (TV && Q.isUndefValue(TV))
423     return FV;
424   if (FV && Q.isUndefValue(FV))
425     return TV;
426 
427   // If applying the operation did not change the true and false select values,
428   // then the result of the binop is the select itself.
429   if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
430     return SI;
431 
432   // If one branch simplified and the other did not, and the simplified
433   // value is equal to the unsimplified one, return the simplified value.
434   // For example, select (cond, X, X & Z) & Z -> X & Z.
435   if ((FV && !TV) || (TV && !FV)) {
436     // Check that the simplified value has the form "X op Y" where "op" is the
437     // same as the original operation.
438     Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
439     if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) {
440       // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
441       // We already know that "op" is the same as for the simplified value.  See
442       // if the operands match too.  If so, return the simplified value.
443       Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
444       Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
445       Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
446       if (Simplified->getOperand(0) == UnsimplifiedLHS &&
447           Simplified->getOperand(1) == UnsimplifiedRHS)
448         return Simplified;
449       if (Simplified->isCommutative() &&
450           Simplified->getOperand(1) == UnsimplifiedLHS &&
451           Simplified->getOperand(0) == UnsimplifiedRHS)
452         return Simplified;
453     }
454   }
455 
456   return nullptr;
457 }
458 
459 /// In the case of a comparison with a select instruction, try to simplify the
460 /// comparison by seeing whether both branches of the select result in the same
461 /// value. Returns the common value if so, otherwise returns null.
462 /// For example, if we have:
463 ///  %tmp = select i1 %cmp, i32 1, i32 2
464 ///  %cmp1 = icmp sle i32 %tmp, 3
465 /// We can simplify %cmp1 to true, because both branches of select are
466 /// less than 3. We compose new comparison by substituting %tmp with both
467 /// branches of select and see if it can be simplified.
468 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
469                                   Value *RHS, const SimplifyQuery &Q,
470                                   unsigned MaxRecurse) {
471   // Recursion is always used, so bail out at once if we already hit the limit.
472   if (!MaxRecurse--)
473     return nullptr;
474 
475   // Make sure the select is on the LHS.
476   if (!isa<SelectInst>(LHS)) {
477     std::swap(LHS, RHS);
478     Pred = CmpInst::getSwappedPredicate(Pred);
479   }
480   assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
481   SelectInst *SI = cast<SelectInst>(LHS);
482   Value *Cond = SI->getCondition();
483   Value *TV = SI->getTrueValue();
484   Value *FV = SI->getFalseValue();
485 
486   // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
487   // Does "cmp TV, RHS" simplify?
488   Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
489   if (!TCmp)
490     return nullptr;
491 
492   // Does "cmp FV, RHS" simplify?
493   Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
494   if (!FCmp)
495     return nullptr;
496 
497   // If both sides simplified to the same value, then use it as the result of
498   // the original comparison.
499   if (TCmp == FCmp)
500     return TCmp;
501 
502   // The remaining cases only make sense if the select condition has the same
503   // type as the result of the comparison, so bail out if this is not so.
504   if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
505     return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
506 
507   return nullptr;
508 }
509 
510 /// In the case of a binary operation with an operand that is a PHI instruction,
511 /// try to simplify the binop by seeing whether evaluating it on the incoming
512 /// phi values yields the same result for every value. If so returns the common
513 /// value, otherwise returns null.
514 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
515                                  Value *RHS, const SimplifyQuery &Q,
516                                  unsigned MaxRecurse) {
517   // Recursion is always used, so bail out at once if we already hit the limit.
518   if (!MaxRecurse--)
519     return nullptr;
520 
521   PHINode *PI;
522   if (isa<PHINode>(LHS)) {
523     PI = cast<PHINode>(LHS);
524     // Bail out if RHS and the phi may be mutually interdependent due to a loop.
525     if (!valueDominatesPHI(RHS, PI, Q.DT))
526       return nullptr;
527   } else {
528     assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
529     PI = cast<PHINode>(RHS);
530     // Bail out if LHS and the phi may be mutually interdependent due to a loop.
531     if (!valueDominatesPHI(LHS, PI, Q.DT))
532       return nullptr;
533   }
534 
535   // Evaluate the BinOp on the incoming phi values.
536   Value *CommonValue = nullptr;
537   for (Value *Incoming : PI->incoming_values()) {
538     // If the incoming value is the phi node itself, it can safely be skipped.
539     if (Incoming == PI) continue;
540     Value *V = PI == LHS ?
541       SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) :
542       SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
543     // If the operation failed to simplify, or simplified to a different value
544     // to previously, then give up.
545     if (!V || (CommonValue && V != CommonValue))
546       return nullptr;
547     CommonValue = V;
548   }
549 
550   return CommonValue;
551 }
552 
553 /// In the case of a comparison with a PHI instruction, try to simplify the
554 /// comparison by seeing whether comparing with all of the incoming phi values
555 /// yields the same result every time. If so returns the common result,
556 /// otherwise returns null.
557 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
558                                const SimplifyQuery &Q, unsigned MaxRecurse) {
559   // Recursion is always used, so bail out at once if we already hit the limit.
560   if (!MaxRecurse--)
561     return nullptr;
562 
563   // Make sure the phi is on the LHS.
564   if (!isa<PHINode>(LHS)) {
565     std::swap(LHS, RHS);
566     Pred = CmpInst::getSwappedPredicate(Pred);
567   }
568   assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
569   PHINode *PI = cast<PHINode>(LHS);
570 
571   // Bail out if RHS and the phi may be mutually interdependent due to a loop.
572   if (!valueDominatesPHI(RHS, PI, Q.DT))
573     return nullptr;
574 
575   // Evaluate the BinOp on the incoming phi values.
576   Value *CommonValue = nullptr;
577   for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
578     Value *Incoming = PI->getIncomingValue(u);
579     Instruction *InTI = PI->getIncomingBlock(u)->getTerminator();
580     // If the incoming value is the phi node itself, it can safely be skipped.
581     if (Incoming == PI) continue;
582     // Change the context instruction to the "edge" that flows into the phi.
583     // This is important because that is where incoming is actually "evaluated"
584     // even though it is used later somewhere else.
585     Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI),
586                                MaxRecurse);
587     // If the operation failed to simplify, or simplified to a different value
588     // to previously, then give up.
589     if (!V || (CommonValue && V != CommonValue))
590       return nullptr;
591     CommonValue = V;
592   }
593 
594   return CommonValue;
595 }
596 
597 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
598                                        Value *&Op0, Value *&Op1,
599                                        const SimplifyQuery &Q) {
600   if (auto *CLHS = dyn_cast<Constant>(Op0)) {
601     if (auto *CRHS = dyn_cast<Constant>(Op1))
602       return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
603 
604     // Canonicalize the constant to the RHS if this is a commutative operation.
605     if (Instruction::isCommutative(Opcode))
606       std::swap(Op0, Op1);
607   }
608   return nullptr;
609 }
610 
611 /// Given operands for an Add, see if we can fold the result.
612 /// If not, this returns null.
613 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
614                               const SimplifyQuery &Q, unsigned MaxRecurse) {
615   if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
616     return C;
617 
618   // X + undef -> undef
619   if (Q.isUndefValue(Op1))
620     return Op1;
621 
622   // X + 0 -> X
623   if (match(Op1, m_Zero()))
624     return Op0;
625 
626   // If two operands are negative, return 0.
627   if (isKnownNegation(Op0, Op1))
628     return Constant::getNullValue(Op0->getType());
629 
630   // X + (Y - X) -> Y
631   // (Y - X) + X -> Y
632   // Eg: X + -X -> 0
633   Value *Y = nullptr;
634   if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
635       match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
636     return Y;
637 
638   // X + ~X -> -1   since   ~X = -X-1
639   Type *Ty = Op0->getType();
640   if (match(Op0, m_Not(m_Specific(Op1))) ||
641       match(Op1, m_Not(m_Specific(Op0))))
642     return Constant::getAllOnesValue(Ty);
643 
644   // add nsw/nuw (xor Y, signmask), signmask --> Y
645   // The no-wrapping add guarantees that the top bit will be set by the add.
646   // Therefore, the xor must be clearing the already set sign bit of Y.
647   if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
648       match(Op0, m_Xor(m_Value(Y), m_SignMask())))
649     return Y;
650 
651   // add nuw %x, -1  ->  -1, because %x can only be 0.
652   if (IsNUW && match(Op1, m_AllOnes()))
653     return Op1; // Which is -1.
654 
655   /// i1 add -> xor.
656   if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
657     if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
658       return V;
659 
660   // Try some generic simplifications for associative operations.
661   if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q,
662                                           MaxRecurse))
663     return V;
664 
665   // Threading Add over selects and phi nodes is pointless, so don't bother.
666   // Threading over the select in "A + select(cond, B, C)" means evaluating
667   // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
668   // only if B and C are equal.  If B and C are equal then (since we assume
669   // that operands have already been simplified) "select(cond, B, C)" should
670   // have been simplified to the common value of B and C already.  Analysing
671   // "A+B" and "A+C" thus gains nothing, but costs compile time.  Similarly
672   // for threading over phi nodes.
673 
674   return nullptr;
675 }
676 
677 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
678                              const SimplifyQuery &Query) {
679   return ::SimplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
680 }
681 
682 /// Compute the base pointer and cumulative constant offsets for V.
683 ///
684 /// This strips all constant offsets off of V, leaving it the base pointer, and
685 /// accumulates the total constant offset applied in the returned constant. It
686 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
687 /// no constant offsets applied.
688 ///
689 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
690 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
691 /// folding.
692 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
693                                                 bool AllowNonInbounds = false) {
694   assert(V->getType()->isPtrOrPtrVectorTy());
695 
696   Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
697   APInt Offset = APInt::getNullValue(IntIdxTy->getIntegerBitWidth());
698 
699   V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
700   // As that strip may trace through `addrspacecast`, need to sext or trunc
701   // the offset calculated.
702   IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
703   Offset = Offset.sextOrTrunc(IntIdxTy->getIntegerBitWidth());
704 
705   Constant *OffsetIntPtr = ConstantInt::get(IntIdxTy, Offset);
706   if (VectorType *VecTy = dyn_cast<VectorType>(V->getType()))
707     return ConstantVector::getSplat(VecTy->getElementCount(), OffsetIntPtr);
708   return OffsetIntPtr;
709 }
710 
711 /// Compute the constant difference between two pointer values.
712 /// If the difference is not a constant, returns zero.
713 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
714                                           Value *RHS) {
715   Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
716   Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
717 
718   // If LHS and RHS are not related via constant offsets to the same base
719   // value, there is nothing we can do here.
720   if (LHS != RHS)
721     return nullptr;
722 
723   // Otherwise, the difference of LHS - RHS can be computed as:
724   //    LHS - RHS
725   //  = (LHSOffset + Base) - (RHSOffset + Base)
726   //  = LHSOffset - RHSOffset
727   return ConstantExpr::getSub(LHSOffset, RHSOffset);
728 }
729 
730 /// Given operands for a Sub, see if we can fold the result.
731 /// If not, this returns null.
732 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
733                               const SimplifyQuery &Q, unsigned MaxRecurse) {
734   if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
735     return C;
736 
737   // X - undef -> undef
738   // undef - X -> undef
739   if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
740     return UndefValue::get(Op0->getType());
741 
742   // X - 0 -> X
743   if (match(Op1, m_Zero()))
744     return Op0;
745 
746   // X - X -> 0
747   if (Op0 == Op1)
748     return Constant::getNullValue(Op0->getType());
749 
750   // Is this a negation?
751   if (match(Op0, m_Zero())) {
752     // 0 - X -> 0 if the sub is NUW.
753     if (isNUW)
754       return Constant::getNullValue(Op0->getType());
755 
756     KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
757     if (Known.Zero.isMaxSignedValue()) {
758       // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
759       // Op1 must be 0 because negating the minimum signed value is undefined.
760       if (isNSW)
761         return Constant::getNullValue(Op0->getType());
762 
763       // 0 - X -> X if X is 0 or the minimum signed value.
764       return Op1;
765     }
766   }
767 
768   // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
769   // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
770   Value *X = nullptr, *Y = nullptr, *Z = Op1;
771   if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
772     // See if "V === Y - Z" simplifies.
773     if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
774       // It does!  Now see if "X + V" simplifies.
775       if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) {
776         // It does, we successfully reassociated!
777         ++NumReassoc;
778         return W;
779       }
780     // See if "V === X - Z" simplifies.
781     if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
782       // It does!  Now see if "Y + V" simplifies.
783       if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) {
784         // It does, we successfully reassociated!
785         ++NumReassoc;
786         return W;
787       }
788   }
789 
790   // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
791   // For example, X - (X + 1) -> -1
792   X = Op0;
793   if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
794     // See if "V === X - Y" simplifies.
795     if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
796       // It does!  Now see if "V - Z" simplifies.
797       if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) {
798         // It does, we successfully reassociated!
799         ++NumReassoc;
800         return W;
801       }
802     // See if "V === X - Z" simplifies.
803     if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
804       // It does!  Now see if "V - Y" simplifies.
805       if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) {
806         // It does, we successfully reassociated!
807         ++NumReassoc;
808         return W;
809       }
810   }
811 
812   // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
813   // For example, X - (X - Y) -> Y.
814   Z = Op0;
815   if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
816     // See if "V === Z - X" simplifies.
817     if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1))
818       // It does!  Now see if "V + Y" simplifies.
819       if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) {
820         // It does, we successfully reassociated!
821         ++NumReassoc;
822         return W;
823       }
824 
825   // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
826   if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
827       match(Op1, m_Trunc(m_Value(Y))))
828     if (X->getType() == Y->getType())
829       // See if "V === X - Y" simplifies.
830       if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
831         // It does!  Now see if "trunc V" simplifies.
832         if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(),
833                                         Q, MaxRecurse - 1))
834           // It does, return the simplified "trunc V".
835           return W;
836 
837   // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
838   if (match(Op0, m_PtrToInt(m_Value(X))) &&
839       match(Op1, m_PtrToInt(m_Value(Y))))
840     if (Constant *Result = computePointerDifference(Q.DL, X, Y))
841       return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
842 
843   // i1 sub -> xor.
844   if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
845     if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
846       return V;
847 
848   // Threading Sub over selects and phi nodes is pointless, so don't bother.
849   // Threading over the select in "A - select(cond, B, C)" means evaluating
850   // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
851   // only if B and C are equal.  If B and C are equal then (since we assume
852   // that operands have already been simplified) "select(cond, B, C)" should
853   // have been simplified to the common value of B and C already.  Analysing
854   // "A-B" and "A-C" thus gains nothing, but costs compile time.  Similarly
855   // for threading over phi nodes.
856 
857   return nullptr;
858 }
859 
860 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
861                              const SimplifyQuery &Q) {
862   return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
863 }
864 
865 /// Given operands for a Mul, see if we can fold the result.
866 /// If not, this returns null.
867 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
868                               unsigned MaxRecurse) {
869   if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
870     return C;
871 
872   // X * undef -> 0
873   // X * 0 -> 0
874   if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
875     return Constant::getNullValue(Op0->getType());
876 
877   // X * 1 -> X
878   if (match(Op1, m_One()))
879     return Op0;
880 
881   // (X / Y) * Y -> X if the division is exact.
882   Value *X = nullptr;
883   if (Q.IIQ.UseInstrInfo &&
884       (match(Op0,
885              m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) ||     // (X / Y) * Y
886        match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
887     return X;
888 
889   // i1 mul -> and.
890   if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
891     if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
892       return V;
893 
894   // Try some generic simplifications for associative operations.
895   if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q,
896                                           MaxRecurse))
897     return V;
898 
899   // Mul distributes over Add. Try some generic simplifications based on this.
900   if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
901                                         Instruction::Add, Q, MaxRecurse))
902     return V;
903 
904   // If the operation is with the result of a select instruction, check whether
905   // operating on either branch of the select always yields the same value.
906   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
907     if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q,
908                                          MaxRecurse))
909       return V;
910 
911   // If the operation is with the result of a phi instruction, check whether
912   // operating on all incoming values of the phi always yields the same value.
913   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
914     if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q,
915                                       MaxRecurse))
916       return V;
917 
918   return nullptr;
919 }
920 
921 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
922   return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit);
923 }
924 
925 /// Check for common or similar folds of integer division or integer remainder.
926 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
927 static Value *simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0,
928                              Value *Op1, const SimplifyQuery &Q) {
929   bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
930   bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
931 
932   Type *Ty = Op0->getType();
933 
934   // X / undef -> poison
935   // X % undef -> poison
936   if (Q.isUndefValue(Op1))
937     return PoisonValue::get(Ty);
938 
939   // X / 0 -> poison
940   // X % 0 -> poison
941   // We don't need to preserve faults!
942   if (match(Op1, m_Zero()))
943     return PoisonValue::get(Ty);
944 
945   // If any element of a constant divisor fixed width vector is zero or undef
946   // the behavior is undefined and we can fold the whole op to poison.
947   auto *Op1C = dyn_cast<Constant>(Op1);
948   auto *VTy = dyn_cast<FixedVectorType>(Ty);
949   if (Op1C && VTy) {
950     unsigned NumElts = VTy->getNumElements();
951     for (unsigned i = 0; i != NumElts; ++i) {
952       Constant *Elt = Op1C->getAggregateElement(i);
953       if (Elt && (Elt->isNullValue() || Q.isUndefValue(Elt)))
954         return PoisonValue::get(Ty);
955     }
956   }
957 
958   // undef / X -> 0
959   // undef % X -> 0
960   if (Q.isUndefValue(Op0))
961     return Constant::getNullValue(Ty);
962 
963   // 0 / X -> 0
964   // 0 % X -> 0
965   if (match(Op0, m_Zero()))
966     return Constant::getNullValue(Op0->getType());
967 
968   // X / X -> 1
969   // X % X -> 0
970   if (Op0 == Op1)
971     return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
972 
973   // X / 1 -> X
974   // X % 1 -> 0
975   // If this is a boolean op (single-bit element type), we can't have
976   // division-by-zero or remainder-by-zero, so assume the divisor is 1.
977   // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1.
978   Value *X;
979   if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1) ||
980       (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
981     return IsDiv ? Op0 : Constant::getNullValue(Ty);
982 
983   // If X * Y does not overflow, then:
984   //   X * Y / Y -> X
985   //   X * Y % Y -> 0
986   if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
987     auto *Mul = cast<OverflowingBinaryOperator>(Op0);
988     // The multiplication can't overflow if it is defined not to, or if
989     // X == A / Y for some A.
990     if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
991         (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
992         (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
993         (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) {
994       return IsDiv ? X : Constant::getNullValue(Op0->getType());
995     }
996   }
997 
998   return nullptr;
999 }
1000 
1001 /// Given a predicate and two operands, return true if the comparison is true.
1002 /// This is a helper for div/rem simplification where we return some other value
1003 /// when we can prove a relationship between the operands.
1004 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS,
1005                        const SimplifyQuery &Q, unsigned MaxRecurse) {
1006   Value *V = SimplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
1007   Constant *C = dyn_cast_or_null<Constant>(V);
1008   return (C && C->isAllOnesValue());
1009 }
1010 
1011 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
1012 /// to simplify X % Y to X.
1013 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
1014                       unsigned MaxRecurse, bool IsSigned) {
1015   // Recursion is always used, so bail out at once if we already hit the limit.
1016   if (!MaxRecurse--)
1017     return false;
1018 
1019   if (IsSigned) {
1020     // |X| / |Y| --> 0
1021     //
1022     // We require that 1 operand is a simple constant. That could be extended to
1023     // 2 variables if we computed the sign bit for each.
1024     //
1025     // Make sure that a constant is not the minimum signed value because taking
1026     // the abs() of that is undefined.
1027     Type *Ty = X->getType();
1028     const APInt *C;
1029     if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
1030       // Is the variable divisor magnitude always greater than the constant
1031       // dividend magnitude?
1032       // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1033       Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
1034       Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1035       if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
1036           isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
1037         return true;
1038     }
1039     if (match(Y, m_APInt(C))) {
1040       // Special-case: we can't take the abs() of a minimum signed value. If
1041       // that's the divisor, then all we have to do is prove that the dividend
1042       // is also not the minimum signed value.
1043       if (C->isMinSignedValue())
1044         return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1045 
1046       // Is the variable dividend magnitude always less than the constant
1047       // divisor magnitude?
1048       // |X| < |C| --> X > -abs(C) and X < abs(C)
1049       Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1050       Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1051       if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1052           isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1053         return true;
1054     }
1055     return false;
1056   }
1057 
1058   // IsSigned == false.
1059   // Is the dividend unsigned less than the divisor?
1060   return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1061 }
1062 
1063 /// These are simplifications common to SDiv and UDiv.
1064 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1065                           const SimplifyQuery &Q, unsigned MaxRecurse) {
1066   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1067     return C;
1068 
1069   if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q))
1070     return V;
1071 
1072   bool IsSigned = Opcode == Instruction::SDiv;
1073 
1074   // (X rem Y) / Y -> 0
1075   if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1076       (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1077     return Constant::getNullValue(Op0->getType());
1078 
1079   // (X /u C1) /u C2 -> 0 if C1 * C2 overflow
1080   ConstantInt *C1, *C2;
1081   if (!IsSigned && match(Op0, m_UDiv(m_Value(), m_ConstantInt(C1))) &&
1082       match(Op1, m_ConstantInt(C2))) {
1083     bool Overflow;
1084     (void)C1->getValue().umul_ov(C2->getValue(), Overflow);
1085     if (Overflow)
1086       return Constant::getNullValue(Op0->getType());
1087   }
1088 
1089   // If the operation is with the result of a select instruction, check whether
1090   // operating on either branch of the select always yields the same value.
1091   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1092     if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1093       return V;
1094 
1095   // If the operation is with the result of a phi instruction, check whether
1096   // operating on all incoming values of the phi always yields the same value.
1097   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1098     if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1099       return V;
1100 
1101   if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1102     return Constant::getNullValue(Op0->getType());
1103 
1104   return nullptr;
1105 }
1106 
1107 /// These are simplifications common to SRem and URem.
1108 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1109                           const SimplifyQuery &Q, unsigned MaxRecurse) {
1110   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1111     return C;
1112 
1113   if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q))
1114     return V;
1115 
1116   // (X % Y) % Y -> X % Y
1117   if ((Opcode == Instruction::SRem &&
1118        match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1119       (Opcode == Instruction::URem &&
1120        match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1121     return Op0;
1122 
1123   // (X << Y) % X -> 0
1124   if (Q.IIQ.UseInstrInfo &&
1125       ((Opcode == Instruction::SRem &&
1126         match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1127        (Opcode == Instruction::URem &&
1128         match(Op0, m_NUWShl(m_Specific(Op1), m_Value())))))
1129     return Constant::getNullValue(Op0->getType());
1130 
1131   // If the operation is with the result of a select instruction, check whether
1132   // operating on either branch of the select always yields the same value.
1133   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1134     if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1135       return V;
1136 
1137   // If the operation is with the result of a phi instruction, check whether
1138   // operating on all incoming values of the phi always yields the same value.
1139   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1140     if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1141       return V;
1142 
1143   // If X / Y == 0, then X % Y == X.
1144   if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem))
1145     return Op0;
1146 
1147   return nullptr;
1148 }
1149 
1150 /// Given operands for an SDiv, see if we can fold the result.
1151 /// If not, this returns null.
1152 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1153                                unsigned MaxRecurse) {
1154   // If two operands are negated and no signed overflow, return -1.
1155   if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1156     return Constant::getAllOnesValue(Op0->getType());
1157 
1158   return simplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse);
1159 }
1160 
1161 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1162   return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit);
1163 }
1164 
1165 /// Given operands for a UDiv, see if we can fold the result.
1166 /// If not, this returns null.
1167 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1168                                unsigned MaxRecurse) {
1169   return simplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse);
1170 }
1171 
1172 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1173   return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit);
1174 }
1175 
1176 /// Given operands for an SRem, see if we can fold the result.
1177 /// If not, this returns null.
1178 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1179                                unsigned MaxRecurse) {
1180   // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1181   // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1182   Value *X;
1183   if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1184     return ConstantInt::getNullValue(Op0->getType());
1185 
1186   // If the two operands are negated, return 0.
1187   if (isKnownNegation(Op0, Op1))
1188     return ConstantInt::getNullValue(Op0->getType());
1189 
1190   return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1191 }
1192 
1193 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1194   return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit);
1195 }
1196 
1197 /// Given operands for a URem, see if we can fold the result.
1198 /// If not, this returns null.
1199 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1200                                unsigned MaxRecurse) {
1201   return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1202 }
1203 
1204 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1205   return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit);
1206 }
1207 
1208 /// Returns true if a shift by \c Amount always yields poison.
1209 static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1210   Constant *C = dyn_cast<Constant>(Amount);
1211   if (!C)
1212     return false;
1213 
1214   // X shift by undef -> poison because it may shift by the bitwidth.
1215   if (Q.isUndefValue(C))
1216     return true;
1217 
1218   // Shifting by the bitwidth or more is undefined.
1219   if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1220     if (CI->getValue().uge(CI->getType()->getScalarSizeInBits()))
1221       return true;
1222 
1223   // If all lanes of a vector shift are undefined the whole shift is.
1224   if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1225     for (unsigned I = 0,
1226                   E = cast<FixedVectorType>(C->getType())->getNumElements();
1227          I != E; ++I)
1228       if (!isPoisonShift(C->getAggregateElement(I), Q))
1229         return false;
1230     return true;
1231   }
1232 
1233   return false;
1234 }
1235 
1236 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1237 /// If not, this returns null.
1238 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1239                             Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1240                             unsigned MaxRecurse) {
1241   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1242     return C;
1243 
1244   // 0 shift by X -> 0
1245   if (match(Op0, m_Zero()))
1246     return Constant::getNullValue(Op0->getType());
1247 
1248   // X shift by 0 -> X
1249   // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1250   // would be poison.
1251   Value *X;
1252   if (match(Op1, m_Zero()) ||
1253       (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1254     return Op0;
1255 
1256   // Fold undefined shifts.
1257   if (isPoisonShift(Op1, Q))
1258     return PoisonValue::get(Op0->getType());
1259 
1260   // If the operation is with the result of a select instruction, check whether
1261   // operating on either branch of the select always yields the same value.
1262   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1263     if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1264       return V;
1265 
1266   // If the operation is with the result of a phi instruction, check whether
1267   // operating on all incoming values of the phi always yields the same value.
1268   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1269     if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1270       return V;
1271 
1272   // If any bits in the shift amount make that value greater than or equal to
1273   // the number of bits in the type, the shift is undefined.
1274   KnownBits KnownAmt = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1275   if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1276     return PoisonValue::get(Op0->getType());
1277 
1278   // If all valid bits in the shift amount are known zero, the first operand is
1279   // unchanged.
1280   unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
1281   if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1282     return Op0;
1283 
1284   // Check for nsw shl leading to a poison value.
1285   if (IsNSW) {
1286     assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1287     KnownBits KnownVal = computeKnownBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1288     KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
1289 
1290     if (KnownVal.Zero.isSignBitSet())
1291       KnownShl.Zero.setSignBit();
1292     if (KnownVal.One.isSignBitSet())
1293       KnownShl.One.setSignBit();
1294 
1295     if (KnownShl.hasConflict())
1296       return PoisonValue::get(Op0->getType());
1297   }
1298 
1299   return nullptr;
1300 }
1301 
1302 /// Given operands for an Shl, LShr or AShr, see if we can
1303 /// fold the result.  If not, this returns null.
1304 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1305                                  Value *Op1, bool isExact, const SimplifyQuery &Q,
1306                                  unsigned MaxRecurse) {
1307   if (Value *V =
1308           SimplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1309     return V;
1310 
1311   // X >> X -> 0
1312   if (Op0 == Op1)
1313     return Constant::getNullValue(Op0->getType());
1314 
1315   // undef >> X -> 0
1316   // undef >> X -> undef (if it's exact)
1317   if (Q.isUndefValue(Op0))
1318     return isExact ? Op0 : Constant::getNullValue(Op0->getType());
1319 
1320   // The low bit cannot be shifted out of an exact shift if it is set.
1321   if (isExact) {
1322     KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
1323     if (Op0Known.One[0])
1324       return Op0;
1325   }
1326 
1327   return nullptr;
1328 }
1329 
1330 /// Given operands for an Shl, see if we can fold the result.
1331 /// If not, this returns null.
1332 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1333                               const SimplifyQuery &Q, unsigned MaxRecurse) {
1334   if (Value *V =
1335           SimplifyShift(Instruction::Shl, Op0, Op1, isNSW, Q, MaxRecurse))
1336     return V;
1337 
1338   // undef << X -> 0
1339   // undef << X -> undef if (if it's NSW/NUW)
1340   if (Q.isUndefValue(Op0))
1341     return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType());
1342 
1343   // (X >> A) << A -> X
1344   Value *X;
1345   if (Q.IIQ.UseInstrInfo &&
1346       match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1347     return X;
1348 
1349   // shl nuw i8 C, %x  ->  C  iff C has sign bit set.
1350   if (isNUW && match(Op0, m_Negative()))
1351     return Op0;
1352   // NOTE: could use computeKnownBits() / LazyValueInfo,
1353   // but the cost-benefit analysis suggests it isn't worth it.
1354 
1355   return nullptr;
1356 }
1357 
1358 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1359                              const SimplifyQuery &Q) {
1360   return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
1361 }
1362 
1363 /// Given operands for an LShr, see if we can fold the result.
1364 /// If not, this returns null.
1365 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1366                                const SimplifyQuery &Q, unsigned MaxRecurse) {
1367   if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q,
1368                                     MaxRecurse))
1369       return V;
1370 
1371   // (X << A) >> A -> X
1372   Value *X;
1373   if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1374     return X;
1375 
1376   // ((X << A) | Y) >> A -> X  if effective width of Y is not larger than A.
1377   // We can return X as we do in the above case since OR alters no bits in X.
1378   // SimplifyDemandedBits in InstCombine can do more general optimization for
1379   // bit manipulation. This pattern aims to provide opportunities for other
1380   // optimizers by supporting a simple but common case in InstSimplify.
1381   Value *Y;
1382   const APInt *ShRAmt, *ShLAmt;
1383   if (match(Op1, m_APInt(ShRAmt)) &&
1384       match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1385       *ShRAmt == *ShLAmt) {
1386     const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1387     const unsigned Width = Op0->getType()->getScalarSizeInBits();
1388     const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros();
1389     if (ShRAmt->uge(EffWidthY))
1390       return X;
1391   }
1392 
1393   return nullptr;
1394 }
1395 
1396 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1397                               const SimplifyQuery &Q) {
1398   return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1399 }
1400 
1401 /// Given operands for an AShr, see if we can fold the result.
1402 /// If not, this returns null.
1403 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1404                                const SimplifyQuery &Q, unsigned MaxRecurse) {
1405   if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q,
1406                                     MaxRecurse))
1407     return V;
1408 
1409   // all ones >>a X -> -1
1410   // Do not return Op0 because it may contain undef elements if it's a vector.
1411   if (match(Op0, m_AllOnes()))
1412     return Constant::getAllOnesValue(Op0->getType());
1413 
1414   // (X << A) >> A -> X
1415   Value *X;
1416   if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1417     return X;
1418 
1419   // Arithmetic shifting an all-sign-bit value is a no-op.
1420   unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1421   if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1422     return Op0;
1423 
1424   return nullptr;
1425 }
1426 
1427 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1428                               const SimplifyQuery &Q) {
1429   return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1430 }
1431 
1432 /// Commuted variants are assumed to be handled by calling this function again
1433 /// with the parameters swapped.
1434 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1435                                          ICmpInst *UnsignedICmp, bool IsAnd,
1436                                          const SimplifyQuery &Q) {
1437   Value *X, *Y;
1438 
1439   ICmpInst::Predicate EqPred;
1440   if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1441       !ICmpInst::isEquality(EqPred))
1442     return nullptr;
1443 
1444   ICmpInst::Predicate UnsignedPred;
1445 
1446   Value *A, *B;
1447   // Y = (A - B);
1448   if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1449     if (match(UnsignedICmp,
1450               m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1451         ICmpInst::isUnsigned(UnsignedPred)) {
1452       // A >=/<= B || (A - B) != 0  <-->  true
1453       if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1454            UnsignedPred == ICmpInst::ICMP_ULE) &&
1455           EqPred == ICmpInst::ICMP_NE && !IsAnd)
1456         return ConstantInt::getTrue(UnsignedICmp->getType());
1457       // A </> B && (A - B) == 0  <-->  false
1458       if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1459            UnsignedPred == ICmpInst::ICMP_UGT) &&
1460           EqPred == ICmpInst::ICMP_EQ && IsAnd)
1461         return ConstantInt::getFalse(UnsignedICmp->getType());
1462 
1463       // A </> B && (A - B) != 0  <-->  A </> B
1464       // A </> B || (A - B) != 0  <-->  (A - B) != 0
1465       if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1466                                           UnsignedPred == ICmpInst::ICMP_UGT))
1467         return IsAnd ? UnsignedICmp : ZeroICmp;
1468 
1469       // A <=/>= B && (A - B) == 0  <-->  (A - B) == 0
1470       // A <=/>= B || (A - B) == 0  <-->  A <=/>= B
1471       if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1472                                           UnsignedPred == ICmpInst::ICMP_UGE))
1473         return IsAnd ? ZeroICmp : UnsignedICmp;
1474     }
1475 
1476     // Given  Y = (A - B)
1477     //   Y >= A && Y != 0  --> Y >= A  iff B != 0
1478     //   Y <  A || Y == 0  --> Y <  A  iff B != 0
1479     if (match(UnsignedICmp,
1480               m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1481       if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1482           EqPred == ICmpInst::ICMP_NE &&
1483           isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1484         return UnsignedICmp;
1485       if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1486           EqPred == ICmpInst::ICMP_EQ &&
1487           isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1488         return UnsignedICmp;
1489     }
1490   }
1491 
1492   if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1493       ICmpInst::isUnsigned(UnsignedPred))
1494     ;
1495   else if (match(UnsignedICmp,
1496                  m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1497            ICmpInst::isUnsigned(UnsignedPred))
1498     UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1499   else
1500     return nullptr;
1501 
1502   // X > Y && Y == 0  -->  Y == 0  iff X != 0
1503   // X > Y || Y == 0  -->  X > Y   iff X != 0
1504   if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1505       isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1506     return IsAnd ? ZeroICmp : UnsignedICmp;
1507 
1508   // X <= Y && Y != 0  -->  X <= Y  iff X != 0
1509   // X <= Y || Y != 0  -->  Y != 0  iff X != 0
1510   if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1511       isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1512     return IsAnd ? UnsignedICmp : ZeroICmp;
1513 
1514   // The transforms below here are expected to be handled more generally with
1515   // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1516   // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1517   // these are candidates for removal.
1518 
1519   // X < Y && Y != 0  -->  X < Y
1520   // X < Y || Y != 0  -->  Y != 0
1521   if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1522     return IsAnd ? UnsignedICmp : ZeroICmp;
1523 
1524   // X >= Y && Y == 0  -->  Y == 0
1525   // X >= Y || Y == 0  -->  X >= Y
1526   if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1527     return IsAnd ? ZeroICmp : UnsignedICmp;
1528 
1529   // X < Y && Y == 0  -->  false
1530   if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1531       IsAnd)
1532     return getFalse(UnsignedICmp->getType());
1533 
1534   // X >= Y || Y != 0  -->  true
1535   if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1536       !IsAnd)
1537     return getTrue(UnsignedICmp->getType());
1538 
1539   return nullptr;
1540 }
1541 
1542 /// Commuted variants are assumed to be handled by calling this function again
1543 /// with the parameters swapped.
1544 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1545   ICmpInst::Predicate Pred0, Pred1;
1546   Value *A ,*B;
1547   if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1548       !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1549     return nullptr;
1550 
1551   // We have (icmp Pred0, A, B) & (icmp Pred1, A, B).
1552   // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1553   // can eliminate Op1 from this 'and'.
1554   if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1555     return Op0;
1556 
1557   // Check for any combination of predicates that are guaranteed to be disjoint.
1558   if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1559       (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) ||
1560       (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) ||
1561       (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT))
1562     return getFalse(Op0->getType());
1563 
1564   return nullptr;
1565 }
1566 
1567 /// Commuted variants are assumed to be handled by calling this function again
1568 /// with the parameters swapped.
1569 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1570   ICmpInst::Predicate Pred0, Pred1;
1571   Value *A ,*B;
1572   if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1573       !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1574     return nullptr;
1575 
1576   // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
1577   // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1578   // can eliminate Op0 from this 'or'.
1579   if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1580     return Op1;
1581 
1582   // Check for any combination of predicates that cover the entire range of
1583   // possibilities.
1584   if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1585       (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
1586       (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
1587       (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
1588     return getTrue(Op0->getType());
1589 
1590   return nullptr;
1591 }
1592 
1593 /// Test if a pair of compares with a shared operand and 2 constants has an
1594 /// empty set intersection, full set union, or if one compare is a superset of
1595 /// the other.
1596 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1597                                                 bool IsAnd) {
1598   // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1599   if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1600     return nullptr;
1601 
1602   const APInt *C0, *C1;
1603   if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1604       !match(Cmp1->getOperand(1), m_APInt(C1)))
1605     return nullptr;
1606 
1607   auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1608   auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1609 
1610   // For and-of-compares, check if the intersection is empty:
1611   // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1612   if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1613     return getFalse(Cmp0->getType());
1614 
1615   // For or-of-compares, check if the union is full:
1616   // (icmp X, C0) || (icmp X, C1) --> full set --> true
1617   if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1618     return getTrue(Cmp0->getType());
1619 
1620   // Is one range a superset of the other?
1621   // If this is and-of-compares, take the smaller set:
1622   // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1623   // If this is or-of-compares, take the larger set:
1624   // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1625   if (Range0.contains(Range1))
1626     return IsAnd ? Cmp1 : Cmp0;
1627   if (Range1.contains(Range0))
1628     return IsAnd ? Cmp0 : Cmp1;
1629 
1630   return nullptr;
1631 }
1632 
1633 static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1,
1634                                            bool IsAnd) {
1635   ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate();
1636   if (!match(Cmp0->getOperand(1), m_Zero()) ||
1637       !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1)
1638     return nullptr;
1639 
1640   if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ))
1641     return nullptr;
1642 
1643   // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)".
1644   Value *X = Cmp0->getOperand(0);
1645   Value *Y = Cmp1->getOperand(0);
1646 
1647   // If one of the compares is a masked version of a (not) null check, then
1648   // that compare implies the other, so we eliminate the other. Optionally, look
1649   // through a pointer-to-int cast to match a null check of a pointer type.
1650 
1651   // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0
1652   // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0
1653   // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0
1654   // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0
1655   if (match(Y, m_c_And(m_Specific(X), m_Value())) ||
1656       match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value())))
1657     return Cmp1;
1658 
1659   // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0
1660   // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0
1661   // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0
1662   // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0
1663   if (match(X, m_c_And(m_Specific(Y), m_Value())) ||
1664       match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value())))
1665     return Cmp0;
1666 
1667   return nullptr;
1668 }
1669 
1670 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1671                                         const InstrInfoQuery &IIQ) {
1672   // (icmp (add V, C0), C1) & (icmp V, C0)
1673   ICmpInst::Predicate Pred0, Pred1;
1674   const APInt *C0, *C1;
1675   Value *V;
1676   if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1677     return nullptr;
1678 
1679   if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1680     return nullptr;
1681 
1682   auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1683   if (AddInst->getOperand(1) != Op1->getOperand(1))
1684     return nullptr;
1685 
1686   Type *ITy = Op0->getType();
1687   bool isNSW = IIQ.hasNoSignedWrap(AddInst);
1688   bool isNUW = IIQ.hasNoUnsignedWrap(AddInst);
1689 
1690   const APInt Delta = *C1 - *C0;
1691   if (C0->isStrictlyPositive()) {
1692     if (Delta == 2) {
1693       if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1694         return getFalse(ITy);
1695       if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1696         return getFalse(ITy);
1697     }
1698     if (Delta == 1) {
1699       if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1700         return getFalse(ITy);
1701       if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1702         return getFalse(ITy);
1703     }
1704   }
1705   if (C0->getBoolValue() && isNUW) {
1706     if (Delta == 2)
1707       if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1708         return getFalse(ITy);
1709     if (Delta == 1)
1710       if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1711         return getFalse(ITy);
1712   }
1713 
1714   return nullptr;
1715 }
1716 
1717 /// Try to eliminate compares with signed or unsigned min/max constants.
1718 static Value *simplifyAndOrOfICmpsWithLimitConst(ICmpInst *Cmp0, ICmpInst *Cmp1,
1719                                                  bool IsAnd) {
1720   // Canonicalize an equality compare as Cmp0.
1721   if (Cmp1->isEquality())
1722     std::swap(Cmp0, Cmp1);
1723   if (!Cmp0->isEquality())
1724     return nullptr;
1725 
1726   // The non-equality compare must include a common operand (X). Canonicalize
1727   // the common operand as operand 0 (the predicate is swapped if the common
1728   // operand was operand 1).
1729   ICmpInst::Predicate Pred0 = Cmp0->getPredicate();
1730   Value *X = Cmp0->getOperand(0);
1731   ICmpInst::Predicate Pred1;
1732   bool HasNotOp = match(Cmp1, m_c_ICmp(Pred1, m_Not(m_Specific(X)), m_Value()));
1733   if (!HasNotOp && !match(Cmp1, m_c_ICmp(Pred1, m_Specific(X), m_Value())))
1734     return nullptr;
1735   if (ICmpInst::isEquality(Pred1))
1736     return nullptr;
1737 
1738   // The equality compare must be against a constant. Flip bits if we matched
1739   // a bitwise not. Convert a null pointer constant to an integer zero value.
1740   APInt MinMaxC;
1741   const APInt *C;
1742   if (match(Cmp0->getOperand(1), m_APInt(C)))
1743     MinMaxC = HasNotOp ? ~*C : *C;
1744   else if (isa<ConstantPointerNull>(Cmp0->getOperand(1)))
1745     MinMaxC = APInt::getNullValue(8);
1746   else
1747     return nullptr;
1748 
1749   // DeMorganize if this is 'or': P0 || P1 --> !P0 && !P1.
1750   if (!IsAnd) {
1751     Pred0 = ICmpInst::getInversePredicate(Pred0);
1752     Pred1 = ICmpInst::getInversePredicate(Pred1);
1753   }
1754 
1755   // Normalize to unsigned compare and unsigned min/max value.
1756   // Example for 8-bit: -128 + 128 -> 0; 127 + 128 -> 255
1757   if (ICmpInst::isSigned(Pred1)) {
1758     Pred1 = ICmpInst::getUnsignedPredicate(Pred1);
1759     MinMaxC += APInt::getSignedMinValue(MinMaxC.getBitWidth());
1760   }
1761 
1762   // (X != MAX) && (X < Y) --> X < Y
1763   // (X == MAX) || (X >= Y) --> X >= Y
1764   if (MinMaxC.isMaxValue())
1765     if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT)
1766       return Cmp1;
1767 
1768   // (X != MIN) && (X > Y) -->  X > Y
1769   // (X == MIN) || (X <= Y) --> X <= Y
1770   if (MinMaxC.isMinValue())
1771     if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_UGT)
1772       return Cmp1;
1773 
1774   return nullptr;
1775 }
1776 
1777 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1778                                  const SimplifyQuery &Q) {
1779   if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1780     return X;
1781   if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1782     return X;
1783 
1784   if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
1785     return X;
1786   if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0))
1787     return X;
1788 
1789   if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1790     return X;
1791 
1792   if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, true))
1793     return X;
1794 
1795   if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true))
1796     return X;
1797 
1798   if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1799     return X;
1800   if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1801     return X;
1802 
1803   return nullptr;
1804 }
1805 
1806 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1807                                        const InstrInfoQuery &IIQ) {
1808   // (icmp (add V, C0), C1) | (icmp V, C0)
1809   ICmpInst::Predicate Pred0, Pred1;
1810   const APInt *C0, *C1;
1811   Value *V;
1812   if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1813     return nullptr;
1814 
1815   if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1816     return nullptr;
1817 
1818   auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1819   if (AddInst->getOperand(1) != Op1->getOperand(1))
1820     return nullptr;
1821 
1822   Type *ITy = Op0->getType();
1823   bool isNSW = IIQ.hasNoSignedWrap(AddInst);
1824   bool isNUW = IIQ.hasNoUnsignedWrap(AddInst);
1825 
1826   const APInt Delta = *C1 - *C0;
1827   if (C0->isStrictlyPositive()) {
1828     if (Delta == 2) {
1829       if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1830         return getTrue(ITy);
1831       if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1832         return getTrue(ITy);
1833     }
1834     if (Delta == 1) {
1835       if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1836         return getTrue(ITy);
1837       if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1838         return getTrue(ITy);
1839     }
1840   }
1841   if (C0->getBoolValue() && isNUW) {
1842     if (Delta == 2)
1843       if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1844         return getTrue(ITy);
1845     if (Delta == 1)
1846       if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1847         return getTrue(ITy);
1848   }
1849 
1850   return nullptr;
1851 }
1852 
1853 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1854                                 const SimplifyQuery &Q) {
1855   if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1856     return X;
1857   if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1858     return X;
1859 
1860   if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
1861     return X;
1862   if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0))
1863     return X;
1864 
1865   if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1866     return X;
1867 
1868   if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, false))
1869     return X;
1870 
1871   if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false))
1872     return X;
1873 
1874   if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1875     return X;
1876   if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1877     return X;
1878 
1879   return nullptr;
1880 }
1881 
1882 static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI,
1883                                    FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) {
1884   Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1885   Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1886   if (LHS0->getType() != RHS0->getType())
1887     return nullptr;
1888 
1889   FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1890   if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1891       (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1892     // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y
1893     // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X
1894     // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y
1895     // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X
1896     // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y
1897     // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X
1898     // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
1899     // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
1900     if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) ||
1901         (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1)))
1902       return RHS;
1903 
1904     // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
1905     // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X
1906     // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y
1907     // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X
1908     // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y
1909     // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X
1910     // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
1911     // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
1912     if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) ||
1913         (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1)))
1914       return LHS;
1915   }
1916 
1917   return nullptr;
1918 }
1919 
1920 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q,
1921                                   Value *Op0, Value *Op1, bool IsAnd) {
1922   // Look through casts of the 'and' operands to find compares.
1923   auto *Cast0 = dyn_cast<CastInst>(Op0);
1924   auto *Cast1 = dyn_cast<CastInst>(Op1);
1925   if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1926       Cast0->getSrcTy() == Cast1->getSrcTy()) {
1927     Op0 = Cast0->getOperand(0);
1928     Op1 = Cast1->getOperand(0);
1929   }
1930 
1931   Value *V = nullptr;
1932   auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1933   auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1934   if (ICmp0 && ICmp1)
1935     V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
1936               : simplifyOrOfICmps(ICmp0, ICmp1, Q);
1937 
1938   auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1939   auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1940   if (FCmp0 && FCmp1)
1941     V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd);
1942 
1943   if (!V)
1944     return nullptr;
1945   if (!Cast0)
1946     return V;
1947 
1948   // If we looked through casts, we can only handle a constant simplification
1949   // because we are not allowed to create a cast instruction here.
1950   if (auto *C = dyn_cast<Constant>(V))
1951     return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType());
1952 
1953   return nullptr;
1954 }
1955 
1956 /// Given a bitwise logic op, check if the operands are add/sub with a common
1957 /// source value and inverted constant (identity: C - X -> ~(X + ~C)).
1958 static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1,
1959                                     Instruction::BinaryOps Opcode) {
1960   assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
1961   assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
1962   Value *X;
1963   Constant *C1, *C2;
1964   if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) &&
1965        match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) ||
1966       (match(Op1, m_Add(m_Value(X), m_Constant(C1))) &&
1967        match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) {
1968     if (ConstantExpr::getNot(C1) == C2) {
1969       // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
1970       // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
1971       // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
1972       Type *Ty = Op0->getType();
1973       return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
1974                                         : ConstantInt::getAllOnesValue(Ty);
1975     }
1976   }
1977   return nullptr;
1978 }
1979 
1980 /// Given operands for an And, see if we can fold the result.
1981 /// If not, this returns null.
1982 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1983                               unsigned MaxRecurse) {
1984   if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
1985     return C;
1986 
1987   // X & undef -> 0
1988   if (Q.isUndefValue(Op1))
1989     return Constant::getNullValue(Op0->getType());
1990 
1991   // X & X = X
1992   if (Op0 == Op1)
1993     return Op0;
1994 
1995   // X & 0 = 0
1996   if (match(Op1, m_Zero()))
1997     return Constant::getNullValue(Op0->getType());
1998 
1999   // X & -1 = X
2000   if (match(Op1, m_AllOnes()))
2001     return Op0;
2002 
2003   // A & ~A  =  ~A & A  =  0
2004   if (match(Op0, m_Not(m_Specific(Op1))) ||
2005       match(Op1, m_Not(m_Specific(Op0))))
2006     return Constant::getNullValue(Op0->getType());
2007 
2008   // (A | ?) & A = A
2009   if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
2010     return Op1;
2011 
2012   // A & (A | ?) = A
2013   if (match(Op1, m_c_Or(m_Specific(Op0), m_Value())))
2014     return Op0;
2015 
2016   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2017     return V;
2018 
2019   // A mask that only clears known zeros of a shifted value is a no-op.
2020   Value *X;
2021   const APInt *Mask;
2022   const APInt *ShAmt;
2023   if (match(Op1, m_APInt(Mask))) {
2024     // If all bits in the inverted and shifted mask are clear:
2025     // and (shl X, ShAmt), Mask --> shl X, ShAmt
2026     if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2027         (~(*Mask)).lshr(*ShAmt).isNullValue())
2028       return Op0;
2029 
2030     // If all bits in the inverted and shifted mask are clear:
2031     // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2032     if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2033         (~(*Mask)).shl(*ShAmt).isNullValue())
2034       return Op0;
2035   }
2036 
2037   // If we have a multiplication overflow check that is being 'and'ed with a
2038   // check that one of the multipliers is not zero, we can omit the 'and', and
2039   // only keep the overflow check.
2040   if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
2041     return Op1;
2042   if (isCheckForZeroAndMulWithOverflow(Op1, Op0, true))
2043     return Op0;
2044 
2045   // A & (-A) = A if A is a power of two or zero.
2046   if (match(Op0, m_Neg(m_Specific(Op1))) ||
2047       match(Op1, m_Neg(m_Specific(Op0)))) {
2048     if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2049                                Q.DT))
2050       return Op0;
2051     if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2052                                Q.DT))
2053       return Op1;
2054   }
2055 
2056   // This is a similar pattern used for checking if a value is a power-of-2:
2057   // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2058   // A & (A - 1) --> 0 (if A is a power-of-2 or 0)
2059   if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
2060       isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2061     return Constant::getNullValue(Op1->getType());
2062   if (match(Op1, m_Add(m_Specific(Op0), m_AllOnes())) &&
2063       isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2064     return Constant::getNullValue(Op0->getType());
2065 
2066   if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2067     return V;
2068 
2069   // Try some generic simplifications for associative operations.
2070   if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
2071                                           MaxRecurse))
2072     return V;
2073 
2074   // And distributes over Or.  Try some generic simplifications based on this.
2075   if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2076                                         Instruction::Or, Q, MaxRecurse))
2077     return V;
2078 
2079   // And distributes over Xor.  Try some generic simplifications based on this.
2080   if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2081                                         Instruction::Xor, Q, MaxRecurse))
2082     return V;
2083 
2084   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2085     if (Op0->getType()->isIntOrIntVectorTy(1)) {
2086       // A & (A && B) -> A && B
2087       if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero())))
2088         return Op1;
2089       else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero())))
2090         return Op0;
2091     }
2092     // If the operation is with the result of a select instruction, check
2093     // whether operating on either branch of the select always yields the same
2094     // value.
2095     if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q,
2096                                          MaxRecurse))
2097       return V;
2098   }
2099 
2100   // If the operation is with the result of a phi instruction, check whether
2101   // operating on all incoming values of the phi always yields the same value.
2102   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2103     if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q,
2104                                       MaxRecurse))
2105       return V;
2106 
2107   // Assuming the effective width of Y is not larger than A, i.e. all bits
2108   // from X and Y are disjoint in (X << A) | Y,
2109   // if the mask of this AND op covers all bits of X or Y, while it covers
2110   // no bits from the other, we can bypass this AND op. E.g.,
2111   // ((X << A) | Y) & Mask -> Y,
2112   //     if Mask = ((1 << effective_width_of(Y)) - 1)
2113   // ((X << A) | Y) & Mask -> X << A,
2114   //     if Mask = ((1 << effective_width_of(X)) - 1) << A
2115   // SimplifyDemandedBits in InstCombine can optimize the general case.
2116   // This pattern aims to help other passes for a common case.
2117   Value *Y, *XShifted;
2118   if (match(Op1, m_APInt(Mask)) &&
2119       match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)),
2120                                      m_Value(XShifted)),
2121                         m_Value(Y)))) {
2122     const unsigned Width = Op0->getType()->getScalarSizeInBits();
2123     const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2124     const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2125     const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros();
2126     if (EffWidthY <= ShftCnt) {
2127       const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI,
2128                                                 Q.DT);
2129       const unsigned EffWidthX = Width - XKnown.countMinLeadingZeros();
2130       const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2131       const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2132       // If the mask is extracting all bits from X or Y as is, we can skip
2133       // this AND op.
2134       if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2135         return Y;
2136       if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2137         return XShifted;
2138     }
2139   }
2140 
2141   return nullptr;
2142 }
2143 
2144 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2145   return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
2146 }
2147 
2148 /// Given operands for an Or, see if we can fold the result.
2149 /// If not, this returns null.
2150 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2151                              unsigned MaxRecurse) {
2152   if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2153     return C;
2154 
2155   // X | undef -> -1
2156   // X | -1 = -1
2157   // Do not return Op1 because it may contain undef elements if it's a vector.
2158   if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes()))
2159     return Constant::getAllOnesValue(Op0->getType());
2160 
2161   // X | X = X
2162   // X | 0 = X
2163   if (Op0 == Op1 || match(Op1, m_Zero()))
2164     return Op0;
2165 
2166   // A | ~A  =  ~A | A  =  -1
2167   if (match(Op0, m_Not(m_Specific(Op1))) ||
2168       match(Op1, m_Not(m_Specific(Op0))))
2169     return Constant::getAllOnesValue(Op0->getType());
2170 
2171   // (A & ?) | A = A
2172   if (match(Op0, m_c_And(m_Specific(Op1), m_Value())))
2173     return Op1;
2174 
2175   // A | (A & ?) = A
2176   if (match(Op1, m_c_And(m_Specific(Op0), m_Value())))
2177     return Op0;
2178 
2179   // ~(A & ?) | A = -1
2180   if (match(Op0, m_Not(m_c_And(m_Specific(Op1), m_Value()))))
2181     return Constant::getAllOnesValue(Op1->getType());
2182 
2183   // A | ~(A & ?) = -1
2184   if (match(Op1, m_Not(m_c_And(m_Specific(Op0), m_Value()))))
2185     return Constant::getAllOnesValue(Op0->getType());
2186 
2187   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2188     return V;
2189 
2190   Value *A, *B, *NotA;
2191   // (A & ~B) | (A ^ B) -> (A ^ B)
2192   // (~B & A) | (A ^ B) -> (A ^ B)
2193   // (A & ~B) | (B ^ A) -> (B ^ A)
2194   // (~B & A) | (B ^ A) -> (B ^ A)
2195   if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
2196       (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
2197        match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
2198     return Op1;
2199 
2200   // Commute the 'or' operands.
2201   // (A ^ B) | (A & ~B) -> (A ^ B)
2202   // (A ^ B) | (~B & A) -> (A ^ B)
2203   // (B ^ A) | (A & ~B) -> (B ^ A)
2204   // (B ^ A) | (~B & A) -> (B ^ A)
2205   if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2206       (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
2207        match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
2208     return Op0;
2209 
2210   // (A & B) | (~A ^ B) -> (~A ^ B)
2211   // (B & A) | (~A ^ B) -> (~A ^ B)
2212   // (A & B) | (B ^ ~A) -> (B ^ ~A)
2213   // (B & A) | (B ^ ~A) -> (B ^ ~A)
2214   if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2215       (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
2216        match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
2217     return Op1;
2218 
2219   // Commute the 'or' operands.
2220   // (~A ^ B) | (A & B) -> (~A ^ B)
2221   // (~A ^ B) | (B & A) -> (~A ^ B)
2222   // (B ^ ~A) | (A & B) -> (B ^ ~A)
2223   // (B ^ ~A) | (B & A) -> (B ^ ~A)
2224   if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
2225       (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
2226        match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
2227     return Op0;
2228 
2229   // (~A & B) | ~(A | B) --> ~A
2230   // (~A & B) | ~(B | A) --> ~A
2231   // (B & ~A) | ~(A | B) --> ~A
2232   // (B & ~A) | ~(B | A) --> ~A
2233   if (match(Op0, m_c_And(m_CombineAnd(m_Value(NotA), m_Not(m_Value(A))),
2234                          m_Value(B))) &&
2235       match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
2236     return NotA;
2237 
2238   // Commute the 'or' operands.
2239   // ~(A | B) | (~A & B) --> ~A
2240   // ~(B | A) | (~A & B) --> ~A
2241   // ~(A | B) | (B & ~A) --> ~A
2242   // ~(B | A) | (B & ~A) --> ~A
2243   if (match(Op1, m_c_And(m_CombineAnd(m_Value(NotA), m_Not(m_Value(A))),
2244                          m_Value(B))) &&
2245       match(Op0, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
2246     return NotA;
2247 
2248   if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2249     return V;
2250 
2251   // If we have a multiplication overflow check that is being 'and'ed with a
2252   // check that one of the multipliers is not zero, we can omit the 'and', and
2253   // only keep the overflow check.
2254   if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
2255     return Op1;
2256   if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
2257     return Op0;
2258 
2259   // Try some generic simplifications for associative operations.
2260   if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
2261                                           MaxRecurse))
2262     return V;
2263 
2264   // Or distributes over And.  Try some generic simplifications based on this.
2265   if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2266                                         Instruction::And, Q, MaxRecurse))
2267     return V;
2268 
2269   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2270     if (Op0->getType()->isIntOrIntVectorTy(1)) {
2271       // A | (A || B) -> A || B
2272       if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value())))
2273         return Op1;
2274       else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value())))
2275         return Op0;
2276     }
2277     // If the operation is with the result of a select instruction, check
2278     // whether operating on either branch of the select always yields the same
2279     // value.
2280     if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q,
2281                                          MaxRecurse))
2282       return V;
2283   }
2284 
2285   // (A & C1)|(B & C2)
2286   const APInt *C1, *C2;
2287   if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2288       match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2289     if (*C1 == ~*C2) {
2290       // (A & C1)|(B & C2)
2291       // If we have: ((V + N) & C1) | (V & C2)
2292       // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2293       // replace with V+N.
2294       Value *N;
2295       if (C2->isMask() && // C2 == 0+1+
2296           match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
2297         // Add commutes, try both ways.
2298         if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2299           return A;
2300       }
2301       // Or commutes, try both ways.
2302       if (C1->isMask() &&
2303           match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2304         // Add commutes, try both ways.
2305         if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2306           return B;
2307       }
2308     }
2309   }
2310 
2311   // If the operation is with the result of a phi instruction, check whether
2312   // operating on all incoming values of the phi always yields the same value.
2313   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2314     if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2315       return V;
2316 
2317   return nullptr;
2318 }
2319 
2320 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2321   return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit);
2322 }
2323 
2324 /// Given operands for a Xor, see if we can fold the result.
2325 /// If not, this returns null.
2326 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2327                               unsigned MaxRecurse) {
2328   if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2329     return C;
2330 
2331   // A ^ undef -> undef
2332   if (Q.isUndefValue(Op1))
2333     return Op1;
2334 
2335   // A ^ 0 = A
2336   if (match(Op1, m_Zero()))
2337     return Op0;
2338 
2339   // A ^ A = 0
2340   if (Op0 == Op1)
2341     return Constant::getNullValue(Op0->getType());
2342 
2343   // A ^ ~A  =  ~A ^ A  =  -1
2344   if (match(Op0, m_Not(m_Specific(Op1))) ||
2345       match(Op1, m_Not(m_Specific(Op0))))
2346     return Constant::getAllOnesValue(Op0->getType());
2347 
2348   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2349     return V;
2350 
2351   // Try some generic simplifications for associative operations.
2352   if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q,
2353                                           MaxRecurse))
2354     return V;
2355 
2356   // Threading Xor over selects and phi nodes is pointless, so don't bother.
2357   // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2358   // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2359   // only if B and C are equal.  If B and C are equal then (since we assume
2360   // that operands have already been simplified) "select(cond, B, C)" should
2361   // have been simplified to the common value of B and C already.  Analysing
2362   // "A^B" and "A^C" thus gains nothing, but costs compile time.  Similarly
2363   // for threading over phi nodes.
2364 
2365   return nullptr;
2366 }
2367 
2368 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2369   return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit);
2370 }
2371 
2372 
2373 static Type *GetCompareTy(Value *Op) {
2374   return CmpInst::makeCmpResultType(Op->getType());
2375 }
2376 
2377 /// Rummage around inside V looking for something equivalent to the comparison
2378 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
2379 /// Helper function for analyzing max/min idioms.
2380 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2381                                          Value *LHS, Value *RHS) {
2382   SelectInst *SI = dyn_cast<SelectInst>(V);
2383   if (!SI)
2384     return nullptr;
2385   CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2386   if (!Cmp)
2387     return nullptr;
2388   Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2389   if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2390     return Cmp;
2391   if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2392       LHS == CmpRHS && RHS == CmpLHS)
2393     return Cmp;
2394   return nullptr;
2395 }
2396 
2397 // A significant optimization not implemented here is assuming that alloca
2398 // addresses are not equal to incoming argument values. They don't *alias*,
2399 // as we say, but that doesn't mean they aren't equal, so we take a
2400 // conservative approach.
2401 //
2402 // This is inspired in part by C++11 5.10p1:
2403 //   "Two pointers of the same type compare equal if and only if they are both
2404 //    null, both point to the same function, or both represent the same
2405 //    address."
2406 //
2407 // This is pretty permissive.
2408 //
2409 // It's also partly due to C11 6.5.9p6:
2410 //   "Two pointers compare equal if and only if both are null pointers, both are
2411 //    pointers to the same object (including a pointer to an object and a
2412 //    subobject at its beginning) or function, both are pointers to one past the
2413 //    last element of the same array object, or one is a pointer to one past the
2414 //    end of one array object and the other is a pointer to the start of a
2415 //    different array object that happens to immediately follow the first array
2416 //    object in the address space.)
2417 //
2418 // C11's version is more restrictive, however there's no reason why an argument
2419 // couldn't be a one-past-the-end value for a stack object in the caller and be
2420 // equal to the beginning of a stack object in the callee.
2421 //
2422 // If the C and C++ standards are ever made sufficiently restrictive in this
2423 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2424 // this optimization.
2425 static Constant *
2426 computePointerICmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2427                    const SimplifyQuery &Q) {
2428   const DataLayout &DL = Q.DL;
2429   const TargetLibraryInfo *TLI = Q.TLI;
2430   const DominatorTree *DT = Q.DT;
2431   const Instruction *CxtI = Q.CxtI;
2432   const InstrInfoQuery &IIQ = Q.IIQ;
2433 
2434   // First, skip past any trivial no-ops.
2435   LHS = LHS->stripPointerCasts();
2436   RHS = RHS->stripPointerCasts();
2437 
2438   // A non-null pointer is not equal to a null pointer.
2439   if (isa<ConstantPointerNull>(RHS) && ICmpInst::isEquality(Pred) &&
2440       llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr,
2441                            IIQ.UseInstrInfo))
2442     return ConstantInt::get(GetCompareTy(LHS),
2443                             !CmpInst::isTrueWhenEqual(Pred));
2444 
2445   // We can only fold certain predicates on pointer comparisons.
2446   switch (Pred) {
2447   default:
2448     return nullptr;
2449 
2450     // Equality comaprisons are easy to fold.
2451   case CmpInst::ICMP_EQ:
2452   case CmpInst::ICMP_NE:
2453     break;
2454 
2455     // We can only handle unsigned relational comparisons because 'inbounds' on
2456     // a GEP only protects against unsigned wrapping.
2457   case CmpInst::ICMP_UGT:
2458   case CmpInst::ICMP_UGE:
2459   case CmpInst::ICMP_ULT:
2460   case CmpInst::ICMP_ULE:
2461     // However, we have to switch them to their signed variants to handle
2462     // negative indices from the base pointer.
2463     Pred = ICmpInst::getSignedPredicate(Pred);
2464     break;
2465   }
2466 
2467   // Strip off any constant offsets so that we can reason about them.
2468   // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2469   // here and compare base addresses like AliasAnalysis does, however there are
2470   // numerous hazards. AliasAnalysis and its utilities rely on special rules
2471   // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2472   // doesn't need to guarantee pointer inequality when it says NoAlias.
2473   Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
2474   Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
2475 
2476   // If LHS and RHS are related via constant offsets to the same base
2477   // value, we can replace it with an icmp which just compares the offsets.
2478   if (LHS == RHS)
2479     return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset);
2480 
2481   // Various optimizations for (in)equality comparisons.
2482   if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2483     // Different non-empty allocations that exist at the same time have
2484     // different addresses (if the program can tell). Global variables always
2485     // exist, so they always exist during the lifetime of each other and all
2486     // allocas. Two different allocas usually have different addresses...
2487     //
2488     // However, if there's an @llvm.stackrestore dynamically in between two
2489     // allocas, they may have the same address. It's tempting to reduce the
2490     // scope of the problem by only looking at *static* allocas here. That would
2491     // cover the majority of allocas while significantly reducing the likelihood
2492     // of having an @llvm.stackrestore pop up in the middle. However, it's not
2493     // actually impossible for an @llvm.stackrestore to pop up in the middle of
2494     // an entry block. Also, if we have a block that's not attached to a
2495     // function, we can't tell if it's "static" under the current definition.
2496     // Theoretically, this problem could be fixed by creating a new kind of
2497     // instruction kind specifically for static allocas. Such a new instruction
2498     // could be required to be at the top of the entry block, thus preventing it
2499     // from being subject to a @llvm.stackrestore. Instcombine could even
2500     // convert regular allocas into these special allocas. It'd be nifty.
2501     // However, until then, this problem remains open.
2502     //
2503     // So, we'll assume that two non-empty allocas have different addresses
2504     // for now.
2505     //
2506     // With all that, if the offsets are within the bounds of their allocations
2507     // (and not one-past-the-end! so we can't use inbounds!), and their
2508     // allocations aren't the same, the pointers are not equal.
2509     //
2510     // Note that it's not necessary to check for LHS being a global variable
2511     // address, due to canonicalization and constant folding.
2512     if (isa<AllocaInst>(LHS) &&
2513         (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) {
2514       ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset);
2515       ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
2516       uint64_t LHSSize, RHSSize;
2517       ObjectSizeOpts Opts;
2518       Opts.NullIsUnknownSize =
2519           NullPointerIsDefined(cast<AllocaInst>(LHS)->getFunction());
2520       if (LHSOffsetCI && RHSOffsetCI &&
2521           getObjectSize(LHS, LHSSize, DL, TLI, Opts) &&
2522           getObjectSize(RHS, RHSSize, DL, TLI, Opts)) {
2523         const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
2524         const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
2525         if (!LHSOffsetValue.isNegative() &&
2526             !RHSOffsetValue.isNegative() &&
2527             LHSOffsetValue.ult(LHSSize) &&
2528             RHSOffsetValue.ult(RHSSize)) {
2529           return ConstantInt::get(GetCompareTy(LHS),
2530                                   !CmpInst::isTrueWhenEqual(Pred));
2531         }
2532       }
2533 
2534       // Repeat the above check but this time without depending on DataLayout
2535       // or being able to compute a precise size.
2536       if (!cast<PointerType>(LHS->getType())->isEmptyTy() &&
2537           !cast<PointerType>(RHS->getType())->isEmptyTy() &&
2538           LHSOffset->isNullValue() &&
2539           RHSOffset->isNullValue())
2540         return ConstantInt::get(GetCompareTy(LHS),
2541                                 !CmpInst::isTrueWhenEqual(Pred));
2542     }
2543 
2544     // Even if an non-inbounds GEP occurs along the path we can still optimize
2545     // equality comparisons concerning the result. We avoid walking the whole
2546     // chain again by starting where the last calls to
2547     // stripAndComputeConstantOffsets left off and accumulate the offsets.
2548     Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
2549     Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
2550     if (LHS == RHS)
2551       return ConstantExpr::getICmp(Pred,
2552                                    ConstantExpr::getAdd(LHSOffset, LHSNoBound),
2553                                    ConstantExpr::getAdd(RHSOffset, RHSNoBound));
2554 
2555     // If one side of the equality comparison must come from a noalias call
2556     // (meaning a system memory allocation function), and the other side must
2557     // come from a pointer that cannot overlap with dynamically-allocated
2558     // memory within the lifetime of the current function (allocas, byval
2559     // arguments, globals), then determine the comparison result here.
2560     SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2561     getUnderlyingObjects(LHS, LHSUObjs);
2562     getUnderlyingObjects(RHS, RHSUObjs);
2563 
2564     // Is the set of underlying objects all noalias calls?
2565     auto IsNAC = [](ArrayRef<const Value *> Objects) {
2566       return all_of(Objects, isNoAliasCall);
2567     };
2568 
2569     // Is the set of underlying objects all things which must be disjoint from
2570     // noalias calls. For allocas, we consider only static ones (dynamic
2571     // allocas might be transformed into calls to malloc not simultaneously
2572     // live with the compared-to allocation). For globals, we exclude symbols
2573     // that might be resolve lazily to symbols in another dynamically-loaded
2574     // library (and, thus, could be malloc'ed by the implementation).
2575     auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2576       return all_of(Objects, [](const Value *V) {
2577         if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2578           return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
2579         if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2580           return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2581                   GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2582                  !GV->isThreadLocal();
2583         if (const Argument *A = dyn_cast<Argument>(V))
2584           return A->hasByValAttr();
2585         return false;
2586       });
2587     };
2588 
2589     if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2590         (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2591         return ConstantInt::get(GetCompareTy(LHS),
2592                                 !CmpInst::isTrueWhenEqual(Pred));
2593 
2594     // Fold comparisons for non-escaping pointer even if the allocation call
2595     // cannot be elided. We cannot fold malloc comparison to null. Also, the
2596     // dynamic allocation call could be either of the operands.
2597     Value *MI = nullptr;
2598     if (isAllocLikeFn(LHS, TLI) &&
2599         llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
2600       MI = LHS;
2601     else if (isAllocLikeFn(RHS, TLI) &&
2602              llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
2603       MI = RHS;
2604     // FIXME: We should also fold the compare when the pointer escapes, but the
2605     // compare dominates the pointer escape
2606     if (MI && !PointerMayBeCaptured(MI, true, true))
2607       return ConstantInt::get(GetCompareTy(LHS),
2608                               CmpInst::isFalseWhenEqual(Pred));
2609   }
2610 
2611   // Otherwise, fail.
2612   return nullptr;
2613 }
2614 
2615 /// Fold an icmp when its operands have i1 scalar type.
2616 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2617                                   Value *RHS, const SimplifyQuery &Q) {
2618   Type *ITy = GetCompareTy(LHS); // The return type.
2619   Type *OpTy = LHS->getType();   // The operand type.
2620   if (!OpTy->isIntOrIntVectorTy(1))
2621     return nullptr;
2622 
2623   // A boolean compared to true/false can be simplified in 14 out of the 20
2624   // (10 predicates * 2 constants) possible combinations. Cases not handled here
2625   // require a 'not' of the LHS, so those must be transformed in InstCombine.
2626   if (match(RHS, m_Zero())) {
2627     switch (Pred) {
2628     case CmpInst::ICMP_NE:  // X !=  0 -> X
2629     case CmpInst::ICMP_UGT: // X >u  0 -> X
2630     case CmpInst::ICMP_SLT: // X <s  0 -> X
2631       return LHS;
2632 
2633     case CmpInst::ICMP_ULT: // X <u  0 -> false
2634     case CmpInst::ICMP_SGT: // X >s  0 -> false
2635       return getFalse(ITy);
2636 
2637     case CmpInst::ICMP_UGE: // X >=u 0 -> true
2638     case CmpInst::ICMP_SLE: // X <=s 0 -> true
2639       return getTrue(ITy);
2640 
2641     default: break;
2642     }
2643   } else if (match(RHS, m_One())) {
2644     switch (Pred) {
2645     case CmpInst::ICMP_EQ:  // X ==   1 -> X
2646     case CmpInst::ICMP_UGE: // X >=u  1 -> X
2647     case CmpInst::ICMP_SLE: // X <=s -1 -> X
2648       return LHS;
2649 
2650     case CmpInst::ICMP_UGT: // X >u   1 -> false
2651     case CmpInst::ICMP_SLT: // X <s  -1 -> false
2652       return getFalse(ITy);
2653 
2654     case CmpInst::ICMP_ULE: // X <=u  1 -> true
2655     case CmpInst::ICMP_SGE: // X >=s -1 -> true
2656       return getTrue(ITy);
2657 
2658     default: break;
2659     }
2660   }
2661 
2662   switch (Pred) {
2663   default:
2664     break;
2665   case ICmpInst::ICMP_UGE:
2666     if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false))
2667       return getTrue(ITy);
2668     break;
2669   case ICmpInst::ICMP_SGE:
2670     /// For signed comparison, the values for an i1 are 0 and -1
2671     /// respectively. This maps into a truth table of:
2672     /// LHS | RHS | LHS >=s RHS   | LHS implies RHS
2673     ///  0  |  0  |  1 (0 >= 0)   |  1
2674     ///  0  |  1  |  1 (0 >= -1)  |  1
2675     ///  1  |  0  |  0 (-1 >= 0)  |  0
2676     ///  1  |  1  |  1 (-1 >= -1) |  1
2677     if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2678       return getTrue(ITy);
2679     break;
2680   case ICmpInst::ICMP_ULE:
2681     if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2682       return getTrue(ITy);
2683     break;
2684   }
2685 
2686   return nullptr;
2687 }
2688 
2689 /// Try hard to fold icmp with zero RHS because this is a common case.
2690 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2691                                    Value *RHS, const SimplifyQuery &Q) {
2692   if (!match(RHS, m_Zero()))
2693     return nullptr;
2694 
2695   Type *ITy = GetCompareTy(LHS); // The return type.
2696   switch (Pred) {
2697   default:
2698     llvm_unreachable("Unknown ICmp predicate!");
2699   case ICmpInst::ICMP_ULT:
2700     return getFalse(ITy);
2701   case ICmpInst::ICMP_UGE:
2702     return getTrue(ITy);
2703   case ICmpInst::ICMP_EQ:
2704   case ICmpInst::ICMP_ULE:
2705     if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2706       return getFalse(ITy);
2707     break;
2708   case ICmpInst::ICMP_NE:
2709   case ICmpInst::ICMP_UGT:
2710     if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2711       return getTrue(ITy);
2712     break;
2713   case ICmpInst::ICMP_SLT: {
2714     KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2715     if (LHSKnown.isNegative())
2716       return getTrue(ITy);
2717     if (LHSKnown.isNonNegative())
2718       return getFalse(ITy);
2719     break;
2720   }
2721   case ICmpInst::ICMP_SLE: {
2722     KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2723     if (LHSKnown.isNegative())
2724       return getTrue(ITy);
2725     if (LHSKnown.isNonNegative() &&
2726         isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2727       return getFalse(ITy);
2728     break;
2729   }
2730   case ICmpInst::ICMP_SGE: {
2731     KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2732     if (LHSKnown.isNegative())
2733       return getFalse(ITy);
2734     if (LHSKnown.isNonNegative())
2735       return getTrue(ITy);
2736     break;
2737   }
2738   case ICmpInst::ICMP_SGT: {
2739     KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2740     if (LHSKnown.isNegative())
2741       return getFalse(ITy);
2742     if (LHSKnown.isNonNegative() &&
2743         isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2744       return getTrue(ITy);
2745     break;
2746   }
2747   }
2748 
2749   return nullptr;
2750 }
2751 
2752 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
2753                                        Value *RHS, const InstrInfoQuery &IIQ) {
2754   Type *ITy = GetCompareTy(RHS); // The return type.
2755 
2756   Value *X;
2757   // Sign-bit checks can be optimized to true/false after unsigned
2758   // floating-point casts:
2759   // icmp slt (bitcast (uitofp X)),  0 --> false
2760   // icmp sgt (bitcast (uitofp X)), -1 --> true
2761   if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) {
2762     if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
2763       return ConstantInt::getFalse(ITy);
2764     if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
2765       return ConstantInt::getTrue(ITy);
2766   }
2767 
2768   const APInt *C;
2769   if (!match(RHS, m_APIntAllowUndef(C)))
2770     return nullptr;
2771 
2772   // Rule out tautological comparisons (eg., ult 0 or uge 0).
2773   ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
2774   if (RHS_CR.isEmptySet())
2775     return ConstantInt::getFalse(ITy);
2776   if (RHS_CR.isFullSet())
2777     return ConstantInt::getTrue(ITy);
2778 
2779   ConstantRange LHS_CR = computeConstantRange(LHS, IIQ.UseInstrInfo);
2780   if (!LHS_CR.isFullSet()) {
2781     if (RHS_CR.contains(LHS_CR))
2782       return ConstantInt::getTrue(ITy);
2783     if (RHS_CR.inverse().contains(LHS_CR))
2784       return ConstantInt::getFalse(ITy);
2785   }
2786 
2787   // (mul nuw/nsw X, MulC) != C --> true  (if C is not a multiple of MulC)
2788   // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
2789   const APInt *MulC;
2790   if (ICmpInst::isEquality(Pred) &&
2791       ((match(LHS, m_NUWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
2792         *MulC != 0 && C->urem(*MulC) != 0) ||
2793        (match(LHS, m_NSWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
2794         *MulC != 0 && C->srem(*MulC) != 0)))
2795     return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
2796 
2797   return nullptr;
2798 }
2799 
2800 static Value *simplifyICmpWithBinOpOnLHS(
2801     CmpInst::Predicate Pred, BinaryOperator *LBO, Value *RHS,
2802     const SimplifyQuery &Q, unsigned MaxRecurse) {
2803   Type *ITy = GetCompareTy(RHS); // The return type.
2804 
2805   Value *Y = nullptr;
2806   // icmp pred (or X, Y), X
2807   if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
2808     if (Pred == ICmpInst::ICMP_ULT)
2809       return getFalse(ITy);
2810     if (Pred == ICmpInst::ICMP_UGE)
2811       return getTrue(ITy);
2812 
2813     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2814       KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2815       KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2816       if (RHSKnown.isNonNegative() && YKnown.isNegative())
2817         return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
2818       if (RHSKnown.isNegative() || YKnown.isNonNegative())
2819         return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
2820     }
2821   }
2822 
2823   // icmp pred (and X, Y), X
2824   if (match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) {
2825     if (Pred == ICmpInst::ICMP_UGT)
2826       return getFalse(ITy);
2827     if (Pred == ICmpInst::ICMP_ULE)
2828       return getTrue(ITy);
2829   }
2830 
2831   // icmp pred (urem X, Y), Y
2832   if (match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
2833     switch (Pred) {
2834     default:
2835       break;
2836     case ICmpInst::ICMP_SGT:
2837     case ICmpInst::ICMP_SGE: {
2838       KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2839       if (!Known.isNonNegative())
2840         break;
2841       LLVM_FALLTHROUGH;
2842     }
2843     case ICmpInst::ICMP_EQ:
2844     case ICmpInst::ICMP_UGT:
2845     case ICmpInst::ICMP_UGE:
2846       return getFalse(ITy);
2847     case ICmpInst::ICMP_SLT:
2848     case ICmpInst::ICMP_SLE: {
2849       KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2850       if (!Known.isNonNegative())
2851         break;
2852       LLVM_FALLTHROUGH;
2853     }
2854     case ICmpInst::ICMP_NE:
2855     case ICmpInst::ICMP_ULT:
2856     case ICmpInst::ICMP_ULE:
2857       return getTrue(ITy);
2858     }
2859   }
2860 
2861   // icmp pred (urem X, Y), X
2862   if (match(LBO, m_URem(m_Specific(RHS), m_Value()))) {
2863     if (Pred == ICmpInst::ICMP_ULE)
2864       return getTrue(ITy);
2865     if (Pred == ICmpInst::ICMP_UGT)
2866       return getFalse(ITy);
2867   }
2868 
2869   // x >> y <=u x
2870   // x udiv y <=u x.
2871   if (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
2872       match(LBO, m_UDiv(m_Specific(RHS), m_Value()))) {
2873     // icmp pred (X op Y), X
2874     if (Pred == ICmpInst::ICMP_UGT)
2875       return getFalse(ITy);
2876     if (Pred == ICmpInst::ICMP_ULE)
2877       return getTrue(ITy);
2878   }
2879 
2880   // (x*C1)/C2 <= x for C1 <= C2.
2881   // This holds even if the multiplication overflows: Assume that x != 0 and
2882   // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
2883   // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
2884   //
2885   // Additionally, either the multiplication and division might be represented
2886   // as shifts:
2887   // (x*C1)>>C2 <= x for C1 < 2**C2.
2888   // (x<<C1)/C2 <= x for 2**C1 < C2.
2889   const APInt *C1, *C2;
2890   if ((match(LBO, m_UDiv(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
2891        C1->ule(*C2)) ||
2892       (match(LBO, m_LShr(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
2893        C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
2894       (match(LBO, m_UDiv(m_Shl(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
2895        (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
2896     if (Pred == ICmpInst::ICMP_UGT)
2897       return getFalse(ITy);
2898     if (Pred == ICmpInst::ICMP_ULE)
2899       return getTrue(ITy);
2900   }
2901 
2902   return nullptr;
2903 }
2904 
2905 
2906 // If only one of the icmp's operands has NSW flags, try to prove that:
2907 //
2908 //   icmp slt (x + C1), (x +nsw C2)
2909 //
2910 // is equivalent to:
2911 //
2912 //   icmp slt C1, C2
2913 //
2914 // which is true if x + C2 has the NSW flags set and:
2915 // *) C1 < C2 && C1 >= 0, or
2916 // *) C2 < C1 && C1 <= 0.
2917 //
2918 static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS,
2919                                     Value *RHS) {
2920   // TODO: only support icmp slt for now.
2921   if (Pred != CmpInst::ICMP_SLT)
2922     return false;
2923 
2924   // Canonicalize nsw add as RHS.
2925   if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
2926     std::swap(LHS, RHS);
2927   if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
2928     return false;
2929 
2930   Value *X;
2931   const APInt *C1, *C2;
2932   if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) ||
2933       !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2))))
2934     return false;
2935 
2936   return (C1->slt(*C2) && C1->isNonNegative()) ||
2937          (C2->slt(*C1) && C1->isNonPositive());
2938 }
2939 
2940 
2941 /// TODO: A large part of this logic is duplicated in InstCombine's
2942 /// foldICmpBinOp(). We should be able to share that and avoid the code
2943 /// duplication.
2944 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
2945                                     Value *RHS, const SimplifyQuery &Q,
2946                                     unsigned MaxRecurse) {
2947   BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
2948   BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
2949   if (MaxRecurse && (LBO || RBO)) {
2950     // Analyze the case when either LHS or RHS is an add instruction.
2951     Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2952     // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
2953     bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
2954     if (LBO && LBO->getOpcode() == Instruction::Add) {
2955       A = LBO->getOperand(0);
2956       B = LBO->getOperand(1);
2957       NoLHSWrapProblem =
2958           ICmpInst::isEquality(Pred) ||
2959           (CmpInst::isUnsigned(Pred) &&
2960            Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) ||
2961           (CmpInst::isSigned(Pred) &&
2962            Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)));
2963     }
2964     if (RBO && RBO->getOpcode() == Instruction::Add) {
2965       C = RBO->getOperand(0);
2966       D = RBO->getOperand(1);
2967       NoRHSWrapProblem =
2968           ICmpInst::isEquality(Pred) ||
2969           (CmpInst::isUnsigned(Pred) &&
2970            Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) ||
2971           (CmpInst::isSigned(Pred) &&
2972            Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO)));
2973     }
2974 
2975     // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2976     if ((A == RHS || B == RHS) && NoLHSWrapProblem)
2977       if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
2978                                       Constant::getNullValue(RHS->getType()), Q,
2979                                       MaxRecurse - 1))
2980         return V;
2981 
2982     // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2983     if ((C == LHS || D == LHS) && NoRHSWrapProblem)
2984       if (Value *V =
2985               SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
2986                                C == LHS ? D : C, Q, MaxRecurse - 1))
2987         return V;
2988 
2989     // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
2990     bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
2991                        trySimplifyICmpWithAdds(Pred, LHS, RHS);
2992     if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
2993       // Determine Y and Z in the form icmp (X+Y), (X+Z).
2994       Value *Y, *Z;
2995       if (A == C) {
2996         // C + B == C + D  ->  B == D
2997         Y = B;
2998         Z = D;
2999       } else if (A == D) {
3000         // D + B == C + D  ->  B == C
3001         Y = B;
3002         Z = C;
3003       } else if (B == C) {
3004         // A + C == C + D  ->  A == D
3005         Y = A;
3006         Z = D;
3007       } else {
3008         assert(B == D);
3009         // A + D == C + D  ->  A == C
3010         Y = A;
3011         Z = C;
3012       }
3013       if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
3014         return V;
3015     }
3016   }
3017 
3018   if (LBO)
3019     if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3020       return V;
3021 
3022   if (RBO)
3023     if (Value *V = simplifyICmpWithBinOpOnLHS(
3024             ICmpInst::getSwappedPredicate(Pred), RBO, LHS, Q, MaxRecurse))
3025       return V;
3026 
3027   // 0 - (zext X) pred C
3028   if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
3029     const APInt *C;
3030     if (match(RHS, m_APInt(C))) {
3031       if (C->isStrictlyPositive()) {
3032         if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3033           return ConstantInt::getTrue(GetCompareTy(RHS));
3034         if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3035           return ConstantInt::getFalse(GetCompareTy(RHS));
3036       }
3037       if (C->isNonNegative()) {
3038         if (Pred == ICmpInst::ICMP_SLE)
3039           return ConstantInt::getTrue(GetCompareTy(RHS));
3040         if (Pred == ICmpInst::ICMP_SGT)
3041           return ConstantInt::getFalse(GetCompareTy(RHS));
3042       }
3043     }
3044   }
3045 
3046   //   If C2 is a power-of-2 and C is not:
3047   //   (C2 << X) == C --> false
3048   //   (C2 << X) != C --> true
3049   const APInt *C;
3050   if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
3051       match(RHS, m_APIntAllowUndef(C)) && !C->isPowerOf2()) {
3052     // C2 << X can equal zero in some circumstances.
3053     // This simplification might be unsafe if C is zero.
3054     //
3055     // We know it is safe if:
3056     // - The shift is nsw. We can't shift out the one bit.
3057     // - The shift is nuw. We can't shift out the one bit.
3058     // - C2 is one.
3059     // - C isn't zero.
3060     if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3061         Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3062         match(LHS, m_Shl(m_One(), m_Value())) || !C->isNullValue()) {
3063       if (Pred == ICmpInst::ICMP_EQ)
3064         return ConstantInt::getFalse(GetCompareTy(RHS));
3065       if (Pred == ICmpInst::ICMP_NE)
3066         return ConstantInt::getTrue(GetCompareTy(RHS));
3067     }
3068   }
3069 
3070   // TODO: This is overly constrained. LHS can be any power-of-2.
3071   // (1 << X)  >u 0x8000 --> false
3072   // (1 << X) <=u 0x8000 --> true
3073   if (match(LHS, m_Shl(m_One(), m_Value())) && match(RHS, m_SignMask())) {
3074     if (Pred == ICmpInst::ICMP_UGT)
3075       return ConstantInt::getFalse(GetCompareTy(RHS));
3076     if (Pred == ICmpInst::ICMP_ULE)
3077       return ConstantInt::getTrue(GetCompareTy(RHS));
3078   }
3079 
3080   if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
3081       LBO->getOperand(1) == RBO->getOperand(1)) {
3082     switch (LBO->getOpcode()) {
3083     default:
3084       break;
3085     case Instruction::UDiv:
3086     case Instruction::LShr:
3087       if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3088           !Q.IIQ.isExact(RBO))
3089         break;
3090       if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3091                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3092           return V;
3093       break;
3094     case Instruction::SDiv:
3095       if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3096           !Q.IIQ.isExact(RBO))
3097         break;
3098       if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3099                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3100         return V;
3101       break;
3102     case Instruction::AShr:
3103       if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3104         break;
3105       if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3106                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3107         return V;
3108       break;
3109     case Instruction::Shl: {
3110       bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3111       bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3112       if (!NUW && !NSW)
3113         break;
3114       if (!NSW && ICmpInst::isSigned(Pred))
3115         break;
3116       if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3117                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3118         return V;
3119       break;
3120     }
3121     }
3122   }
3123   return nullptr;
3124 }
3125 
3126 /// Simplify integer comparisons where at least one operand of the compare
3127 /// matches an integer min/max idiom.
3128 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
3129                                      Value *RHS, const SimplifyQuery &Q,
3130                                      unsigned MaxRecurse) {
3131   Type *ITy = GetCompareTy(LHS); // The return type.
3132   Value *A, *B;
3133   CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
3134   CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3135 
3136   // Signed variants on "max(a,b)>=a -> true".
3137   if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3138     if (A != RHS)
3139       std::swap(A, B);       // smax(A, B) pred A.
3140     EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3141     // We analyze this as smax(A, B) pred A.
3142     P = Pred;
3143   } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3144              (A == LHS || B == LHS)) {
3145     if (A != LHS)
3146       std::swap(A, B);       // A pred smax(A, B).
3147     EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3148     // We analyze this as smax(A, B) swapped-pred A.
3149     P = CmpInst::getSwappedPredicate(Pred);
3150   } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3151              (A == RHS || B == RHS)) {
3152     if (A != RHS)
3153       std::swap(A, B);       // smin(A, B) pred A.
3154     EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3155     // We analyze this as smax(-A, -B) swapped-pred -A.
3156     // Note that we do not need to actually form -A or -B thanks to EqP.
3157     P = CmpInst::getSwappedPredicate(Pred);
3158   } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3159              (A == LHS || B == LHS)) {
3160     if (A != LHS)
3161       std::swap(A, B);       // A pred smin(A, B).
3162     EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3163     // We analyze this as smax(-A, -B) pred -A.
3164     // Note that we do not need to actually form -A or -B thanks to EqP.
3165     P = Pred;
3166   }
3167   if (P != CmpInst::BAD_ICMP_PREDICATE) {
3168     // Cases correspond to "max(A, B) p A".
3169     switch (P) {
3170     default:
3171       break;
3172     case CmpInst::ICMP_EQ:
3173     case CmpInst::ICMP_SLE:
3174       // Equivalent to "A EqP B".  This may be the same as the condition tested
3175       // in the max/min; if so, we can just return that.
3176       if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
3177         return V;
3178       if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
3179         return V;
3180       // Otherwise, see if "A EqP B" simplifies.
3181       if (MaxRecurse)
3182         if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3183           return V;
3184       break;
3185     case CmpInst::ICMP_NE:
3186     case CmpInst::ICMP_SGT: {
3187       CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3188       // Equivalent to "A InvEqP B".  This may be the same as the condition
3189       // tested in the max/min; if so, we can just return that.
3190       if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3191         return V;
3192       if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3193         return V;
3194       // Otherwise, see if "A InvEqP B" simplifies.
3195       if (MaxRecurse)
3196         if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3197           return V;
3198       break;
3199     }
3200     case CmpInst::ICMP_SGE:
3201       // Always true.
3202       return getTrue(ITy);
3203     case CmpInst::ICMP_SLT:
3204       // Always false.
3205       return getFalse(ITy);
3206     }
3207   }
3208 
3209   // Unsigned variants on "max(a,b)>=a -> true".
3210   P = CmpInst::BAD_ICMP_PREDICATE;
3211   if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3212     if (A != RHS)
3213       std::swap(A, B);       // umax(A, B) pred A.
3214     EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3215     // We analyze this as umax(A, B) pred A.
3216     P = Pred;
3217   } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3218              (A == LHS || B == LHS)) {
3219     if (A != LHS)
3220       std::swap(A, B);       // A pred umax(A, B).
3221     EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3222     // We analyze this as umax(A, B) swapped-pred A.
3223     P = CmpInst::getSwappedPredicate(Pred);
3224   } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3225              (A == RHS || B == RHS)) {
3226     if (A != RHS)
3227       std::swap(A, B);       // umin(A, B) pred A.
3228     EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3229     // We analyze this as umax(-A, -B) swapped-pred -A.
3230     // Note that we do not need to actually form -A or -B thanks to EqP.
3231     P = CmpInst::getSwappedPredicate(Pred);
3232   } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3233              (A == LHS || B == LHS)) {
3234     if (A != LHS)
3235       std::swap(A, B);       // A pred umin(A, B).
3236     EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3237     // We analyze this as umax(-A, -B) pred -A.
3238     // Note that we do not need to actually form -A or -B thanks to EqP.
3239     P = Pred;
3240   }
3241   if (P != CmpInst::BAD_ICMP_PREDICATE) {
3242     // Cases correspond to "max(A, B) p A".
3243     switch (P) {
3244     default:
3245       break;
3246     case CmpInst::ICMP_EQ:
3247     case CmpInst::ICMP_ULE:
3248       // Equivalent to "A EqP B".  This may be the same as the condition tested
3249       // in the max/min; if so, we can just return that.
3250       if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
3251         return V;
3252       if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
3253         return V;
3254       // Otherwise, see if "A EqP B" simplifies.
3255       if (MaxRecurse)
3256         if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3257           return V;
3258       break;
3259     case CmpInst::ICMP_NE:
3260     case CmpInst::ICMP_UGT: {
3261       CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3262       // Equivalent to "A InvEqP B".  This may be the same as the condition
3263       // tested in the max/min; if so, we can just return that.
3264       if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3265         return V;
3266       if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3267         return V;
3268       // Otherwise, see if "A InvEqP B" simplifies.
3269       if (MaxRecurse)
3270         if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3271           return V;
3272       break;
3273     }
3274     case CmpInst::ICMP_UGE:
3275       return getTrue(ITy);
3276     case CmpInst::ICMP_ULT:
3277       return getFalse(ITy);
3278     }
3279   }
3280 
3281   // Comparing 1 each of min/max with a common operand?
3282   // Canonicalize min operand to RHS.
3283   if (match(LHS, m_UMin(m_Value(), m_Value())) ||
3284       match(LHS, m_SMin(m_Value(), m_Value()))) {
3285     std::swap(LHS, RHS);
3286     Pred = ICmpInst::getSwappedPredicate(Pred);
3287   }
3288 
3289   Value *C, *D;
3290   if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3291       match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3292       (A == C || A == D || B == C || B == D)) {
3293     // smax(A, B) >=s smin(A, D) --> true
3294     if (Pred == CmpInst::ICMP_SGE)
3295       return getTrue(ITy);
3296     // smax(A, B) <s smin(A, D) --> false
3297     if (Pred == CmpInst::ICMP_SLT)
3298       return getFalse(ITy);
3299   } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3300              match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3301              (A == C || A == D || B == C || B == D)) {
3302     // umax(A, B) >=u umin(A, D) --> true
3303     if (Pred == CmpInst::ICMP_UGE)
3304       return getTrue(ITy);
3305     // umax(A, B) <u umin(A, D) --> false
3306     if (Pred == CmpInst::ICMP_ULT)
3307       return getFalse(ITy);
3308   }
3309 
3310   return nullptr;
3311 }
3312 
3313 static Value *simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,
3314                                                Value *LHS, Value *RHS,
3315                                                const SimplifyQuery &Q) {
3316   // Gracefully handle instructions that have not been inserted yet.
3317   if (!Q.AC || !Q.CxtI || !Q.CxtI->getParent())
3318     return nullptr;
3319 
3320   for (Value *AssumeBaseOp : {LHS, RHS}) {
3321     for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3322       if (!AssumeVH)
3323         continue;
3324 
3325       CallInst *Assume = cast<CallInst>(AssumeVH);
3326       if (Optional<bool> Imp =
3327               isImpliedCondition(Assume->getArgOperand(0), Predicate, LHS, RHS,
3328                                  Q.DL))
3329         if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3330           return ConstantInt::get(GetCompareTy(LHS), *Imp);
3331     }
3332   }
3333 
3334   return nullptr;
3335 }
3336 
3337 /// Given operands for an ICmpInst, see if we can fold the result.
3338 /// If not, this returns null.
3339 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3340                                const SimplifyQuery &Q, unsigned MaxRecurse) {
3341   CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3342   assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3343 
3344   if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3345     if (Constant *CRHS = dyn_cast<Constant>(RHS))
3346       return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3347 
3348     // If we have a constant, make sure it is on the RHS.
3349     std::swap(LHS, RHS);
3350     Pred = CmpInst::getSwappedPredicate(Pred);
3351   }
3352   assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3353 
3354   Type *ITy = GetCompareTy(LHS); // The return type.
3355 
3356   // icmp poison, X -> poison
3357   if (isa<PoisonValue>(RHS))
3358     return PoisonValue::get(ITy);
3359 
3360   // For EQ and NE, we can always pick a value for the undef to make the
3361   // predicate pass or fail, so we can return undef.
3362   // Matches behavior in llvm::ConstantFoldCompareInstruction.
3363   if (Q.isUndefValue(RHS) && ICmpInst::isEquality(Pred))
3364     return UndefValue::get(ITy);
3365 
3366   // icmp X, X -> true/false
3367   // icmp X, undef -> true/false because undef could be X.
3368   if (LHS == RHS || Q.isUndefValue(RHS))
3369     return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3370 
3371   if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3372     return V;
3373 
3374   // TODO: Sink/common this with other potentially expensive calls that use
3375   //       ValueTracking? See comment below for isKnownNonEqual().
3376   if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3377     return V;
3378 
3379   if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ))
3380     return V;
3381 
3382   // If both operands have range metadata, use the metadata
3383   // to simplify the comparison.
3384   if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3385     auto RHS_Instr = cast<Instruction>(RHS);
3386     auto LHS_Instr = cast<Instruction>(LHS);
3387 
3388     if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) &&
3389         Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) {
3390       auto RHS_CR = getConstantRangeFromMetadata(
3391           *RHS_Instr->getMetadata(LLVMContext::MD_range));
3392       auto LHS_CR = getConstantRangeFromMetadata(
3393           *LHS_Instr->getMetadata(LLVMContext::MD_range));
3394 
3395       if (LHS_CR.icmp(Pred, RHS_CR))
3396         return ConstantInt::getTrue(RHS->getContext());
3397 
3398       if (LHS_CR.icmp(CmpInst::getInversePredicate(Pred), RHS_CR))
3399         return ConstantInt::getFalse(RHS->getContext());
3400     }
3401   }
3402 
3403   // Compare of cast, for example (zext X) != 0 -> X != 0
3404   if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3405     Instruction *LI = cast<CastInst>(LHS);
3406     Value *SrcOp = LI->getOperand(0);
3407     Type *SrcTy = SrcOp->getType();
3408     Type *DstTy = LI->getType();
3409 
3410     // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3411     // if the integer type is the same size as the pointer type.
3412     if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3413         Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3414       if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3415         // Transfer the cast to the constant.
3416         if (Value *V = SimplifyICmpInst(Pred, SrcOp,
3417                                         ConstantExpr::getIntToPtr(RHSC, SrcTy),
3418                                         Q, MaxRecurse-1))
3419           return V;
3420       } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3421         if (RI->getOperand(0)->getType() == SrcTy)
3422           // Compare without the cast.
3423           if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3424                                           Q, MaxRecurse-1))
3425             return V;
3426       }
3427     }
3428 
3429     if (isa<ZExtInst>(LHS)) {
3430       // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3431       // same type.
3432       if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3433         if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3434           // Compare X and Y.  Note that signed predicates become unsigned.
3435           if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3436                                           SrcOp, RI->getOperand(0), Q,
3437                                           MaxRecurse-1))
3438             return V;
3439       }
3440       // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3441       else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3442         if (SrcOp == RI->getOperand(0)) {
3443           if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3444             return ConstantInt::getTrue(ITy);
3445           if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3446             return ConstantInt::getFalse(ITy);
3447         }
3448       }
3449       // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3450       // too.  If not, then try to deduce the result of the comparison.
3451       else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3452         // Compute the constant that would happen if we truncated to SrcTy then
3453         // reextended to DstTy.
3454         Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3455         Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
3456 
3457         // If the re-extended constant didn't change then this is effectively
3458         // also a case of comparing two zero-extended values.
3459         if (RExt == CI && MaxRecurse)
3460           if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3461                                         SrcOp, Trunc, Q, MaxRecurse-1))
3462             return V;
3463 
3464         // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3465         // there.  Use this to work out the result of the comparison.
3466         if (RExt != CI) {
3467           switch (Pred) {
3468           default: llvm_unreachable("Unknown ICmp predicate!");
3469           // LHS <u RHS.
3470           case ICmpInst::ICMP_EQ:
3471           case ICmpInst::ICMP_UGT:
3472           case ICmpInst::ICMP_UGE:
3473             return ConstantInt::getFalse(CI->getContext());
3474 
3475           case ICmpInst::ICMP_NE:
3476           case ICmpInst::ICMP_ULT:
3477           case ICmpInst::ICMP_ULE:
3478             return ConstantInt::getTrue(CI->getContext());
3479 
3480           // LHS is non-negative.  If RHS is negative then LHS >s LHS.  If RHS
3481           // is non-negative then LHS <s RHS.
3482           case ICmpInst::ICMP_SGT:
3483           case ICmpInst::ICMP_SGE:
3484             return CI->getValue().isNegative() ?
3485               ConstantInt::getTrue(CI->getContext()) :
3486               ConstantInt::getFalse(CI->getContext());
3487 
3488           case ICmpInst::ICMP_SLT:
3489           case ICmpInst::ICMP_SLE:
3490             return CI->getValue().isNegative() ?
3491               ConstantInt::getFalse(CI->getContext()) :
3492               ConstantInt::getTrue(CI->getContext());
3493           }
3494         }
3495       }
3496     }
3497 
3498     if (isa<SExtInst>(LHS)) {
3499       // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3500       // same type.
3501       if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3502         if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3503           // Compare X and Y.  Note that the predicate does not change.
3504           if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3505                                           Q, MaxRecurse-1))
3506             return V;
3507       }
3508       // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3509       else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3510         if (SrcOp == RI->getOperand(0)) {
3511           if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3512             return ConstantInt::getTrue(ITy);
3513           if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3514             return ConstantInt::getFalse(ITy);
3515         }
3516       }
3517       // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3518       // too.  If not, then try to deduce the result of the comparison.
3519       else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3520         // Compute the constant that would happen if we truncated to SrcTy then
3521         // reextended to DstTy.
3522         Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3523         Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
3524 
3525         // If the re-extended constant didn't change then this is effectively
3526         // also a case of comparing two sign-extended values.
3527         if (RExt == CI && MaxRecurse)
3528           if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1))
3529             return V;
3530 
3531         // Otherwise the upper bits of LHS are all equal, while RHS has varying
3532         // bits there.  Use this to work out the result of the comparison.
3533         if (RExt != CI) {
3534           switch (Pred) {
3535           default: llvm_unreachable("Unknown ICmp predicate!");
3536           case ICmpInst::ICMP_EQ:
3537             return ConstantInt::getFalse(CI->getContext());
3538           case ICmpInst::ICMP_NE:
3539             return ConstantInt::getTrue(CI->getContext());
3540 
3541           // If RHS is non-negative then LHS <s RHS.  If RHS is negative then
3542           // LHS >s RHS.
3543           case ICmpInst::ICMP_SGT:
3544           case ICmpInst::ICMP_SGE:
3545             return CI->getValue().isNegative() ?
3546               ConstantInt::getTrue(CI->getContext()) :
3547               ConstantInt::getFalse(CI->getContext());
3548           case ICmpInst::ICMP_SLT:
3549           case ICmpInst::ICMP_SLE:
3550             return CI->getValue().isNegative() ?
3551               ConstantInt::getFalse(CI->getContext()) :
3552               ConstantInt::getTrue(CI->getContext());
3553 
3554           // If LHS is non-negative then LHS <u RHS.  If LHS is negative then
3555           // LHS >u RHS.
3556           case ICmpInst::ICMP_UGT:
3557           case ICmpInst::ICMP_UGE:
3558             // Comparison is true iff the LHS <s 0.
3559             if (MaxRecurse)
3560               if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3561                                               Constant::getNullValue(SrcTy),
3562                                               Q, MaxRecurse-1))
3563                 return V;
3564             break;
3565           case ICmpInst::ICMP_ULT:
3566           case ICmpInst::ICMP_ULE:
3567             // Comparison is true iff the LHS >=s 0.
3568             if (MaxRecurse)
3569               if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3570                                               Constant::getNullValue(SrcTy),
3571                                               Q, MaxRecurse-1))
3572                 return V;
3573             break;
3574           }
3575         }
3576       }
3577     }
3578   }
3579 
3580   // icmp eq|ne X, Y -> false|true if X != Y
3581   // This is potentially expensive, and we have already computedKnownBits for
3582   // compares with 0 above here, so only try this for a non-zero compare.
3583   if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
3584       isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
3585     return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
3586   }
3587 
3588   if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3589     return V;
3590 
3591   if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3592     return V;
3593 
3594   if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
3595     return V;
3596 
3597   // Simplify comparisons of related pointers using a powerful, recursive
3598   // GEP-walk when we have target data available..
3599   if (LHS->getType()->isPointerTy())
3600     if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
3601       return C;
3602   if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3603     if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3604       if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
3605               Q.DL.getTypeSizeInBits(CLHS->getType()) &&
3606           Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
3607               Q.DL.getTypeSizeInBits(CRHS->getType()))
3608         if (auto *C = computePointerICmp(Pred, CLHS->getPointerOperand(),
3609                                          CRHS->getPointerOperand(), Q))
3610           return C;
3611 
3612   if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
3613     if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
3614       if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
3615           GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
3616           (ICmpInst::isEquality(Pred) ||
3617            (GLHS->isInBounds() && GRHS->isInBounds() &&
3618             Pred == ICmpInst::getSignedPredicate(Pred)))) {
3619         // The bases are equal and the indices are constant.  Build a constant
3620         // expression GEP with the same indices and a null base pointer to see
3621         // what constant folding can make out of it.
3622         Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
3623         SmallVector<Value *, 4> IndicesLHS(GLHS->indices());
3624         Constant *NewLHS = ConstantExpr::getGetElementPtr(
3625             GLHS->getSourceElementType(), Null, IndicesLHS);
3626 
3627         SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
3628         Constant *NewRHS = ConstantExpr::getGetElementPtr(
3629             GLHS->getSourceElementType(), Null, IndicesRHS);
3630         Constant *NewICmp = ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
3631         return ConstantFoldConstant(NewICmp, Q.DL);
3632       }
3633     }
3634   }
3635 
3636   // If the comparison is with the result of a select instruction, check whether
3637   // comparing with either branch of the select always yields the same value.
3638   if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3639     if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3640       return V;
3641 
3642   // If the comparison is with the result of a phi instruction, check whether
3643   // doing the compare with each incoming phi value yields a common result.
3644   if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3645     if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3646       return V;
3647 
3648   return nullptr;
3649 }
3650 
3651 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3652                               const SimplifyQuery &Q) {
3653   return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
3654 }
3655 
3656 /// Given operands for an FCmpInst, see if we can fold the result.
3657 /// If not, this returns null.
3658 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3659                                FastMathFlags FMF, const SimplifyQuery &Q,
3660                                unsigned MaxRecurse) {
3661   CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3662   assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
3663 
3664   if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3665     if (Constant *CRHS = dyn_cast<Constant>(RHS))
3666       return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3667 
3668     // If we have a constant, make sure it is on the RHS.
3669     std::swap(LHS, RHS);
3670     Pred = CmpInst::getSwappedPredicate(Pred);
3671   }
3672 
3673   // Fold trivial predicates.
3674   Type *RetTy = GetCompareTy(LHS);
3675   if (Pred == FCmpInst::FCMP_FALSE)
3676     return getFalse(RetTy);
3677   if (Pred == FCmpInst::FCMP_TRUE)
3678     return getTrue(RetTy);
3679 
3680   // Fold (un)ordered comparison if we can determine there are no NaNs.
3681   if (Pred == FCmpInst::FCMP_UNO || Pred == FCmpInst::FCMP_ORD)
3682     if (FMF.noNaNs() ||
3683         (isKnownNeverNaN(LHS, Q.TLI) && isKnownNeverNaN(RHS, Q.TLI)))
3684       return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
3685 
3686   // NaN is unordered; NaN is not ordered.
3687   assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) &&
3688          "Comparison must be either ordered or unordered");
3689   if (match(RHS, m_NaN()))
3690     return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3691 
3692   // fcmp pred x, undef  and  fcmp pred undef, x
3693   // fold to true if unordered, false if ordered
3694   if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
3695     // Choosing NaN for the undef will always make unordered comparison succeed
3696     // and ordered comparison fail.
3697     return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3698   }
3699 
3700   // fcmp x,x -> true/false.  Not all compares are foldable.
3701   if (LHS == RHS) {
3702     if (CmpInst::isTrueWhenEqual(Pred))
3703       return getTrue(RetTy);
3704     if (CmpInst::isFalseWhenEqual(Pred))
3705       return getFalse(RetTy);
3706   }
3707 
3708   // Handle fcmp with constant RHS.
3709   // TODO: Use match with a specific FP value, so these work with vectors with
3710   // undef lanes.
3711   const APFloat *C;
3712   if (match(RHS, m_APFloat(C))) {
3713     // Check whether the constant is an infinity.
3714     if (C->isInfinity()) {
3715       if (C->isNegative()) {
3716         switch (Pred) {
3717         case FCmpInst::FCMP_OLT:
3718           // No value is ordered and less than negative infinity.
3719           return getFalse(RetTy);
3720         case FCmpInst::FCMP_UGE:
3721           // All values are unordered with or at least negative infinity.
3722           return getTrue(RetTy);
3723         default:
3724           break;
3725         }
3726       } else {
3727         switch (Pred) {
3728         case FCmpInst::FCMP_OGT:
3729           // No value is ordered and greater than infinity.
3730           return getFalse(RetTy);
3731         case FCmpInst::FCMP_ULE:
3732           // All values are unordered with and at most infinity.
3733           return getTrue(RetTy);
3734         default:
3735           break;
3736         }
3737       }
3738 
3739       // LHS == Inf
3740       if (Pred == FCmpInst::FCMP_OEQ && isKnownNeverInfinity(LHS, Q.TLI))
3741         return getFalse(RetTy);
3742       // LHS != Inf
3743       if (Pred == FCmpInst::FCMP_UNE && isKnownNeverInfinity(LHS, Q.TLI))
3744         return getTrue(RetTy);
3745       // LHS == Inf || LHS == NaN
3746       if (Pred == FCmpInst::FCMP_UEQ && isKnownNeverInfinity(LHS, Q.TLI) &&
3747           isKnownNeverNaN(LHS, Q.TLI))
3748         return getFalse(RetTy);
3749       // LHS != Inf && LHS != NaN
3750       if (Pred == FCmpInst::FCMP_ONE && isKnownNeverInfinity(LHS, Q.TLI) &&
3751           isKnownNeverNaN(LHS, Q.TLI))
3752         return getTrue(RetTy);
3753     }
3754     if (C->isNegative() && !C->isNegZero()) {
3755       assert(!C->isNaN() && "Unexpected NaN constant!");
3756       // TODO: We can catch more cases by using a range check rather than
3757       //       relying on CannotBeOrderedLessThanZero.
3758       switch (Pred) {
3759       case FCmpInst::FCMP_UGE:
3760       case FCmpInst::FCMP_UGT:
3761       case FCmpInst::FCMP_UNE:
3762         // (X >= 0) implies (X > C) when (C < 0)
3763         if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3764           return getTrue(RetTy);
3765         break;
3766       case FCmpInst::FCMP_OEQ:
3767       case FCmpInst::FCMP_OLE:
3768       case FCmpInst::FCMP_OLT:
3769         // (X >= 0) implies !(X < C) when (C < 0)
3770         if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3771           return getFalse(RetTy);
3772         break;
3773       default:
3774         break;
3775       }
3776     }
3777 
3778     // Check comparison of [minnum/maxnum with constant] with other constant.
3779     const APFloat *C2;
3780     if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) &&
3781          *C2 < *C) ||
3782         (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) &&
3783          *C2 > *C)) {
3784       bool IsMaxNum =
3785           cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
3786       // The ordered relationship and minnum/maxnum guarantee that we do not
3787       // have NaN constants, so ordered/unordered preds are handled the same.
3788       switch (Pred) {
3789       case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_UEQ:
3790         // minnum(X, LesserC)  == C --> false
3791         // maxnum(X, GreaterC) == C --> false
3792         return getFalse(RetTy);
3793       case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_UNE:
3794         // minnum(X, LesserC)  != C --> true
3795         // maxnum(X, GreaterC) != C --> true
3796         return getTrue(RetTy);
3797       case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_UGE:
3798       case FCmpInst::FCMP_OGT: case FCmpInst::FCMP_UGT:
3799         // minnum(X, LesserC)  >= C --> false
3800         // minnum(X, LesserC)  >  C --> false
3801         // maxnum(X, GreaterC) >= C --> true
3802         // maxnum(X, GreaterC) >  C --> true
3803         return ConstantInt::get(RetTy, IsMaxNum);
3804       case FCmpInst::FCMP_OLE: case FCmpInst::FCMP_ULE:
3805       case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_ULT:
3806         // minnum(X, LesserC)  <= C --> true
3807         // minnum(X, LesserC)  <  C --> true
3808         // maxnum(X, GreaterC) <= C --> false
3809         // maxnum(X, GreaterC) <  C --> false
3810         return ConstantInt::get(RetTy, !IsMaxNum);
3811       default:
3812         // TRUE/FALSE/ORD/UNO should be handled before this.
3813         llvm_unreachable("Unexpected fcmp predicate");
3814       }
3815     }
3816   }
3817 
3818   if (match(RHS, m_AnyZeroFP())) {
3819     switch (Pred) {
3820     case FCmpInst::FCMP_OGE:
3821     case FCmpInst::FCMP_ULT:
3822       // Positive or zero X >= 0.0 --> true
3823       // Positive or zero X <  0.0 --> false
3824       if ((FMF.noNaNs() || isKnownNeverNaN(LHS, Q.TLI)) &&
3825           CannotBeOrderedLessThanZero(LHS, Q.TLI))
3826         return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
3827       break;
3828     case FCmpInst::FCMP_UGE:
3829     case FCmpInst::FCMP_OLT:
3830       // Positive or zero or nan X >= 0.0 --> true
3831       // Positive or zero or nan X <  0.0 --> false
3832       if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3833         return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
3834       break;
3835     default:
3836       break;
3837     }
3838   }
3839 
3840   // If the comparison is with the result of a select instruction, check whether
3841   // comparing with either branch of the select always yields the same value.
3842   if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3843     if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3844       return V;
3845 
3846   // If the comparison is with the result of a phi instruction, check whether
3847   // doing the compare with each incoming phi value yields a common result.
3848   if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3849     if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3850       return V;
3851 
3852   return nullptr;
3853 }
3854 
3855 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3856                               FastMathFlags FMF, const SimplifyQuery &Q) {
3857   return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
3858 }
3859 
3860 static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3861                                      const SimplifyQuery &Q,
3862                                      bool AllowRefinement,
3863                                      unsigned MaxRecurse) {
3864   assert(!Op->getType()->isVectorTy() && "This is not safe for vectors");
3865 
3866   // Trivial replacement.
3867   if (V == Op)
3868     return RepOp;
3869 
3870   // We cannot replace a constant, and shouldn't even try.
3871   if (isa<Constant>(Op))
3872     return nullptr;
3873 
3874   auto *I = dyn_cast<Instruction>(V);
3875   if (!I || !is_contained(I->operands(), Op))
3876     return nullptr;
3877 
3878   // Replace Op with RepOp in instruction operands.
3879   SmallVector<Value *, 8> NewOps(I->getNumOperands());
3880   transform(I->operands(), NewOps.begin(),
3881             [&](Value *V) { return V == Op ? RepOp : V; });
3882 
3883   if (!AllowRefinement) {
3884     // General InstSimplify functions may refine the result, e.g. by returning
3885     // a constant for a potentially poison value. To avoid this, implement only
3886     // a few non-refining but profitable transforms here.
3887 
3888     if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3889       unsigned Opcode = BO->getOpcode();
3890       // id op x -> x, x op id -> x
3891       if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
3892         return NewOps[1];
3893       if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
3894                                                       /* RHS */ true))
3895         return NewOps[0];
3896 
3897       // x & x -> x, x | x -> x
3898       if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
3899           NewOps[0] == NewOps[1])
3900         return NewOps[0];
3901     }
3902 
3903     if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3904       // getelementptr x, 0 -> x
3905       if (NewOps.size() == 2 && match(NewOps[1], m_Zero()) &&
3906           !GEP->isInBounds())
3907         return NewOps[0];
3908     }
3909   } else if (MaxRecurse) {
3910     // The simplification queries below may return the original value. Consider:
3911     //   %div = udiv i32 %arg, %arg2
3912     //   %mul = mul nsw i32 %div, %arg2
3913     //   %cmp = icmp eq i32 %mul, %arg
3914     //   %sel = select i1 %cmp, i32 %div, i32 undef
3915     // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
3916     // simplifies back to %arg. This can only happen because %mul does not
3917     // dominate %div. To ensure a consistent return value contract, we make sure
3918     // that this case returns nullptr as well.
3919     auto PreventSelfSimplify = [V](Value *Simplified) {
3920       return Simplified != V ? Simplified : nullptr;
3921     };
3922 
3923     if (auto *B = dyn_cast<BinaryOperator>(I))
3924       return PreventSelfSimplify(SimplifyBinOp(B->getOpcode(), NewOps[0],
3925                                                NewOps[1], Q, MaxRecurse - 1));
3926 
3927     if (CmpInst *C = dyn_cast<CmpInst>(I))
3928       return PreventSelfSimplify(SimplifyCmpInst(C->getPredicate(), NewOps[0],
3929                                                  NewOps[1], Q, MaxRecurse - 1));
3930 
3931     if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
3932       return PreventSelfSimplify(SimplifyGEPInst(GEP->getSourceElementType(),
3933                                                  NewOps, Q, MaxRecurse - 1));
3934 
3935     if (isa<SelectInst>(I))
3936       return PreventSelfSimplify(
3937           SimplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q,
3938                              MaxRecurse - 1));
3939     // TODO: We could hand off more cases to instsimplify here.
3940   }
3941 
3942   // If all operands are constant after substituting Op for RepOp then we can
3943   // constant fold the instruction.
3944   SmallVector<Constant *, 8> ConstOps;
3945   for (Value *NewOp : NewOps) {
3946     if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
3947       ConstOps.push_back(ConstOp);
3948     else
3949       return nullptr;
3950   }
3951 
3952   // Consider:
3953   //   %cmp = icmp eq i32 %x, 2147483647
3954   //   %add = add nsw i32 %x, 1
3955   //   %sel = select i1 %cmp, i32 -2147483648, i32 %add
3956   //
3957   // We can't replace %sel with %add unless we strip away the flags (which
3958   // will be done in InstCombine).
3959   // TODO: This may be unsound, because it only catches some forms of
3960   // refinement.
3961   if (!AllowRefinement && canCreatePoison(cast<Operator>(I)))
3962     return nullptr;
3963 
3964   if (CmpInst *C = dyn_cast<CmpInst>(I))
3965     return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
3966                                            ConstOps[1], Q.DL, Q.TLI);
3967 
3968   if (LoadInst *LI = dyn_cast<LoadInst>(I))
3969     if (!LI->isVolatile())
3970       return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL);
3971 
3972   return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
3973 }
3974 
3975 Value *llvm::simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3976                                     const SimplifyQuery &Q,
3977                                     bool AllowRefinement) {
3978   return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement,
3979                                   RecursionLimit);
3980 }
3981 
3982 /// Try to simplify a select instruction when its condition operand is an
3983 /// integer comparison where one operand of the compare is a constant.
3984 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
3985                                     const APInt *Y, bool TrueWhenUnset) {
3986   const APInt *C;
3987 
3988   // (X & Y) == 0 ? X & ~Y : X  --> X
3989   // (X & Y) != 0 ? X & ~Y : X  --> X & ~Y
3990   if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
3991       *Y == ~*C)
3992     return TrueWhenUnset ? FalseVal : TrueVal;
3993 
3994   // (X & Y) == 0 ? X : X & ~Y  --> X & ~Y
3995   // (X & Y) != 0 ? X : X & ~Y  --> X
3996   if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
3997       *Y == ~*C)
3998     return TrueWhenUnset ? FalseVal : TrueVal;
3999 
4000   if (Y->isPowerOf2()) {
4001     // (X & Y) == 0 ? X | Y : X  --> X | Y
4002     // (X & Y) != 0 ? X | Y : X  --> X
4003     if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
4004         *Y == *C)
4005       return TrueWhenUnset ? TrueVal : FalseVal;
4006 
4007     // (X & Y) == 0 ? X : X | Y  --> X
4008     // (X & Y) != 0 ? X : X | Y  --> X | Y
4009     if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
4010         *Y == *C)
4011       return TrueWhenUnset ? TrueVal : FalseVal;
4012   }
4013 
4014   return nullptr;
4015 }
4016 
4017 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
4018 /// eq/ne.
4019 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS,
4020                                            ICmpInst::Predicate Pred,
4021                                            Value *TrueVal, Value *FalseVal) {
4022   Value *X;
4023   APInt Mask;
4024   if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask))
4025     return nullptr;
4026 
4027   return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask,
4028                                Pred == ICmpInst::ICMP_EQ);
4029 }
4030 
4031 /// Try to simplify a select instruction when its condition operand is an
4032 /// integer comparison.
4033 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4034                                          Value *FalseVal, const SimplifyQuery &Q,
4035                                          unsigned MaxRecurse) {
4036   ICmpInst::Predicate Pred;
4037   Value *CmpLHS, *CmpRHS;
4038   if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4039     return nullptr;
4040 
4041   // Canonicalize ne to eq predicate.
4042   if (Pred == ICmpInst::ICMP_NE) {
4043     Pred = ICmpInst::ICMP_EQ;
4044     std::swap(TrueVal, FalseVal);
4045   }
4046 
4047   if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
4048     Value *X;
4049     const APInt *Y;
4050     if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
4051       if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4052                                            /*TrueWhenUnset=*/true))
4053         return V;
4054 
4055     // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4056     Value *ShAmt;
4057     auto isFsh = m_CombineOr(m_FShl(m_Value(X), m_Value(), m_Value(ShAmt)),
4058                              m_FShr(m_Value(), m_Value(X), m_Value(ShAmt)));
4059     // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4060     // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4061     if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
4062       return X;
4063 
4064     // Test for a zero-shift-guard-op around rotates. These are used to
4065     // avoid UB from oversized shifts in raw IR rotate patterns, but the
4066     // intrinsics do not have that problem.
4067     // We do not allow this transform for the general funnel shift case because
4068     // that would not preserve the poison safety of the original code.
4069     auto isRotate =
4070         m_CombineOr(m_FShl(m_Value(X), m_Deferred(X), m_Value(ShAmt)),
4071                     m_FShr(m_Value(X), m_Deferred(X), m_Value(ShAmt)));
4072     // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4073     // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4074     if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4075         Pred == ICmpInst::ICMP_EQ)
4076       return FalseVal;
4077 
4078     // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4079     // X == 0 ? -abs(X) : abs(X) --> abs(X)
4080     if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4081         match(FalseVal, m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))))
4082       return FalseVal;
4083     if (match(TrueVal,
4084               m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))) &&
4085         match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4086       return FalseVal;
4087   }
4088 
4089   // Check for other compares that behave like bit test.
4090   if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred,
4091                                               TrueVal, FalseVal))
4092     return V;
4093 
4094   // If we have a scalar equality comparison, then we know the value in one of
4095   // the arms of the select. See if substituting this value into the arm and
4096   // simplifying the result yields the same value as the other arm.
4097   // Note that the equivalence/replacement opportunity does not hold for vectors
4098   // because each element of a vector select is chosen independently.
4099   if (Pred == ICmpInst::ICMP_EQ && !CondVal->getType()->isVectorTy()) {
4100     if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q,
4101                                /* AllowRefinement */ false, MaxRecurse) ==
4102             TrueVal ||
4103         simplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q,
4104                                /* AllowRefinement */ false, MaxRecurse) ==
4105             TrueVal)
4106       return FalseVal;
4107     if (simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q,
4108                                /* AllowRefinement */ true, MaxRecurse) ==
4109             FalseVal ||
4110         simplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q,
4111                                /* AllowRefinement */ true, MaxRecurse) ==
4112             FalseVal)
4113       return FalseVal;
4114   }
4115 
4116   return nullptr;
4117 }
4118 
4119 /// Try to simplify a select instruction when its condition operand is a
4120 /// floating-point comparison.
4121 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F,
4122                                      const SimplifyQuery &Q) {
4123   FCmpInst::Predicate Pred;
4124   if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) &&
4125       !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T))))
4126     return nullptr;
4127 
4128   // This transform is safe if we do not have (do not care about) -0.0 or if
4129   // at least one operand is known to not be -0.0. Otherwise, the select can
4130   // change the sign of a zero operand.
4131   bool HasNoSignedZeros = Q.CxtI && isa<FPMathOperator>(Q.CxtI) &&
4132                           Q.CxtI->hasNoSignedZeros();
4133   const APFloat *C;
4134   if (HasNoSignedZeros || (match(T, m_APFloat(C)) && C->isNonZero()) ||
4135                           (match(F, m_APFloat(C)) && C->isNonZero())) {
4136     // (T == F) ? T : F --> F
4137     // (F == T) ? T : F --> F
4138     if (Pred == FCmpInst::FCMP_OEQ)
4139       return F;
4140 
4141     // (T != F) ? T : F --> T
4142     // (F != T) ? T : F --> T
4143     if (Pred == FCmpInst::FCMP_UNE)
4144       return T;
4145   }
4146 
4147   return nullptr;
4148 }
4149 
4150 /// Given operands for a SelectInst, see if we can fold the result.
4151 /// If not, this returns null.
4152 static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4153                                  const SimplifyQuery &Q, unsigned MaxRecurse) {
4154   if (auto *CondC = dyn_cast<Constant>(Cond)) {
4155     if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4156       if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4157         return ConstantFoldSelectInstruction(CondC, TrueC, FalseC);
4158 
4159     // select undef, X, Y -> X or Y
4160     if (Q.isUndefValue(CondC))
4161       return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4162 
4163     // select true,  X, Y --> X
4164     // select false, X, Y --> Y
4165     // For vectors, allow undef/poison elements in the condition to match the
4166     // defined elements, so we can eliminate the select.
4167     if (match(CondC, m_One()))
4168       return TrueVal;
4169     if (match(CondC, m_Zero()))
4170       return FalseVal;
4171   }
4172 
4173   // select i1 Cond, i1 true, i1 false --> i1 Cond
4174   assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4175          "Select must have bool or bool vector condition");
4176   assert(TrueVal->getType() == FalseVal->getType() &&
4177          "Select must have same types for true/false ops");
4178   if (Cond->getType() == TrueVal->getType() &&
4179       match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
4180     return Cond;
4181 
4182   // select ?, X, X -> X
4183   if (TrueVal == FalseVal)
4184     return TrueVal;
4185 
4186   // If the true or false value is undef, we can fold to the other value as
4187   // long as the other value isn't poison.
4188   // select ?, undef, X -> X
4189   if (Q.isUndefValue(TrueVal) &&
4190       isGuaranteedNotToBeUndefOrPoison(FalseVal, Q.AC, Q.CxtI, Q.DT))
4191     return FalseVal;
4192   // select ?, X, undef -> X
4193   if (Q.isUndefValue(FalseVal) &&
4194       isGuaranteedNotToBeUndefOrPoison(TrueVal, Q.AC, Q.CxtI, Q.DT))
4195     return TrueVal;
4196 
4197   // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
4198   Constant *TrueC, *FalseC;
4199   if (isa<FixedVectorType>(TrueVal->getType()) &&
4200       match(TrueVal, m_Constant(TrueC)) &&
4201       match(FalseVal, m_Constant(FalseC))) {
4202     unsigned NumElts =
4203         cast<FixedVectorType>(TrueC->getType())->getNumElements();
4204     SmallVector<Constant *, 16> NewC;
4205     for (unsigned i = 0; i != NumElts; ++i) {
4206       // Bail out on incomplete vector constants.
4207       Constant *TEltC = TrueC->getAggregateElement(i);
4208       Constant *FEltC = FalseC->getAggregateElement(i);
4209       if (!TEltC || !FEltC)
4210         break;
4211 
4212       // If the elements match (undef or not), that value is the result. If only
4213       // one element is undef, choose the defined element as the safe result.
4214       if (TEltC == FEltC)
4215         NewC.push_back(TEltC);
4216       else if (Q.isUndefValue(TEltC) &&
4217                isGuaranteedNotToBeUndefOrPoison(FEltC))
4218         NewC.push_back(FEltC);
4219       else if (Q.isUndefValue(FEltC) &&
4220                isGuaranteedNotToBeUndefOrPoison(TEltC))
4221         NewC.push_back(TEltC);
4222       else
4223         break;
4224     }
4225     if (NewC.size() == NumElts)
4226       return ConstantVector::get(NewC);
4227   }
4228 
4229   if (Value *V =
4230           simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4231     return V;
4232 
4233   if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q))
4234     return V;
4235 
4236   if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal))
4237     return V;
4238 
4239   Optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
4240   if (Imp)
4241     return *Imp ? TrueVal : FalseVal;
4242 
4243   return nullptr;
4244 }
4245 
4246 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4247                                 const SimplifyQuery &Q) {
4248   return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
4249 }
4250 
4251 /// Given operands for an GetElementPtrInst, see if we can fold the result.
4252 /// If not, this returns null.
4253 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
4254                               const SimplifyQuery &Q, unsigned) {
4255   // The type of the GEP pointer operand.
4256   unsigned AS =
4257       cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
4258 
4259   // getelementptr P -> P.
4260   if (Ops.size() == 1)
4261     return Ops[0];
4262 
4263   // Compute the (pointer) type returned by the GEP instruction.
4264   Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
4265   Type *GEPTy = PointerType::get(LastType, AS);
4266   for (Value *Op : Ops) {
4267     // If one of the operands is a vector, the result type is a vector of
4268     // pointers. All vector operands must have the same number of elements.
4269     if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
4270       GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4271       break;
4272     }
4273   }
4274 
4275   // getelementptr poison, idx -> poison
4276   // getelementptr baseptr, poison -> poison
4277   if (any_of(Ops, [](const auto *V) { return isa<PoisonValue>(V); }))
4278     return PoisonValue::get(GEPTy);
4279 
4280   if (Q.isUndefValue(Ops[0]))
4281     return UndefValue::get(GEPTy);
4282 
4283   bool IsScalableVec =
4284       isa<ScalableVectorType>(SrcTy) || any_of(Ops, [](const Value *V) {
4285         return isa<ScalableVectorType>(V->getType());
4286       });
4287 
4288   if (Ops.size() == 2) {
4289     // getelementptr P, 0 -> P.
4290     if (match(Ops[1], m_Zero()) && Ops[0]->getType() == GEPTy)
4291       return Ops[0];
4292 
4293     Type *Ty = SrcTy;
4294     if (!IsScalableVec && Ty->isSized()) {
4295       Value *P;
4296       uint64_t C;
4297       uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
4298       // getelementptr P, N -> P if P points to a type of zero size.
4299       if (TyAllocSize == 0 && Ops[0]->getType() == GEPTy)
4300         return Ops[0];
4301 
4302       // The following transforms are only safe if the ptrtoint cast
4303       // doesn't truncate the pointers.
4304       if (Ops[1]->getType()->getScalarSizeInBits() ==
4305           Q.DL.getPointerSizeInBits(AS)) {
4306         auto CanSimplify = [GEPTy, &P, V = Ops[0]]() -> bool {
4307           return P->getType() == GEPTy &&
4308                  getUnderlyingObject(P) == getUnderlyingObject(V);
4309         };
4310         // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
4311         if (TyAllocSize == 1 &&
4312             match(Ops[1], m_Sub(m_PtrToInt(m_Value(P)),
4313                                 m_PtrToInt(m_Specific(Ops[0])))) &&
4314             CanSimplify())
4315           return P;
4316 
4317         // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
4318         // size 1 << C.
4319         if (match(Ops[1], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
4320                                        m_PtrToInt(m_Specific(Ops[0]))),
4321                                  m_ConstantInt(C))) &&
4322             TyAllocSize == 1ULL << C && CanSimplify())
4323           return P;
4324 
4325         // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
4326         // size C.
4327         if (match(Ops[1], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
4328                                        m_PtrToInt(m_Specific(Ops[0]))),
4329                                  m_SpecificInt(TyAllocSize))) &&
4330             CanSimplify())
4331           return P;
4332       }
4333     }
4334   }
4335 
4336   if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
4337       all_of(Ops.slice(1).drop_back(1),
4338              [](Value *Idx) { return match(Idx, m_Zero()); })) {
4339     unsigned IdxWidth =
4340         Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace());
4341     if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) {
4342       APInt BasePtrOffset(IdxWidth, 0);
4343       Value *StrippedBasePtr =
4344           Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL,
4345                                                             BasePtrOffset);
4346 
4347       // Avoid creating inttoptr of zero here: While LLVMs treatment of
4348       // inttoptr is generally conservative, this particular case is folded to
4349       // a null pointer, which will have incorrect provenance.
4350 
4351       // gep (gep V, C), (sub 0, V) -> C
4352       if (match(Ops.back(),
4353                 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
4354           !BasePtrOffset.isNullValue()) {
4355         auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
4356         return ConstantExpr::getIntToPtr(CI, GEPTy);
4357       }
4358       // gep (gep V, C), (xor V, -1) -> C-1
4359       if (match(Ops.back(),
4360                 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
4361           !BasePtrOffset.isOneValue()) {
4362         auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
4363         return ConstantExpr::getIntToPtr(CI, GEPTy);
4364       }
4365     }
4366   }
4367 
4368   // Check to see if this is constant foldable.
4369   if (!all_of(Ops, [](Value *V) { return isa<Constant>(V); }))
4370     return nullptr;
4371 
4372   auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]),
4373                                             Ops.slice(1));
4374   return ConstantFoldConstant(CE, Q.DL);
4375 }
4376 
4377 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
4378                              const SimplifyQuery &Q) {
4379   return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit);
4380 }
4381 
4382 /// Given operands for an InsertValueInst, see if we can fold the result.
4383 /// If not, this returns null.
4384 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
4385                                       ArrayRef<unsigned> Idxs, const SimplifyQuery &Q,
4386                                       unsigned) {
4387   if (Constant *CAgg = dyn_cast<Constant>(Agg))
4388     if (Constant *CVal = dyn_cast<Constant>(Val))
4389       return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
4390 
4391   // insertvalue x, undef, n -> x
4392   if (Q.isUndefValue(Val))
4393     return Agg;
4394 
4395   // insertvalue x, (extractvalue y, n), n
4396   if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
4397     if (EV->getAggregateOperand()->getType() == Agg->getType() &&
4398         EV->getIndices() == Idxs) {
4399       // insertvalue undef, (extractvalue y, n), n -> y
4400       if (Q.isUndefValue(Agg))
4401         return EV->getAggregateOperand();
4402 
4403       // insertvalue y, (extractvalue y, n), n -> y
4404       if (Agg == EV->getAggregateOperand())
4405         return Agg;
4406     }
4407 
4408   return nullptr;
4409 }
4410 
4411 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
4412                                      ArrayRef<unsigned> Idxs,
4413                                      const SimplifyQuery &Q) {
4414   return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
4415 }
4416 
4417 Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
4418                                        const SimplifyQuery &Q) {
4419   // Try to constant fold.
4420   auto *VecC = dyn_cast<Constant>(Vec);
4421   auto *ValC = dyn_cast<Constant>(Val);
4422   auto *IdxC = dyn_cast<Constant>(Idx);
4423   if (VecC && ValC && IdxC)
4424     return ConstantExpr::getInsertElement(VecC, ValC, IdxC);
4425 
4426   // For fixed-length vector, fold into poison if index is out of bounds.
4427   if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
4428     if (isa<FixedVectorType>(Vec->getType()) &&
4429         CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
4430       return PoisonValue::get(Vec->getType());
4431   }
4432 
4433   // If index is undef, it might be out of bounds (see above case)
4434   if (Q.isUndefValue(Idx))
4435     return PoisonValue::get(Vec->getType());
4436 
4437   // If the scalar is poison, or it is undef and there is no risk of
4438   // propagating poison from the vector value, simplify to the vector value.
4439   if (isa<PoisonValue>(Val) ||
4440       (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
4441     return Vec;
4442 
4443   // If we are extracting a value from a vector, then inserting it into the same
4444   // place, that's the input vector:
4445   // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
4446   if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
4447     return Vec;
4448 
4449   return nullptr;
4450 }
4451 
4452 /// Given operands for an ExtractValueInst, see if we can fold the result.
4453 /// If not, this returns null.
4454 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4455                                        const SimplifyQuery &, unsigned) {
4456   if (auto *CAgg = dyn_cast<Constant>(Agg))
4457     return ConstantFoldExtractValueInstruction(CAgg, Idxs);
4458 
4459   // extractvalue x, (insertvalue y, elt, n), n -> elt
4460   unsigned NumIdxs = Idxs.size();
4461   for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
4462        IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
4463     ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
4464     unsigned NumInsertValueIdxs = InsertValueIdxs.size();
4465     unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
4466     if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
4467         Idxs.slice(0, NumCommonIdxs)) {
4468       if (NumIdxs == NumInsertValueIdxs)
4469         return IVI->getInsertedValueOperand();
4470       break;
4471     }
4472   }
4473 
4474   return nullptr;
4475 }
4476 
4477 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4478                                       const SimplifyQuery &Q) {
4479   return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
4480 }
4481 
4482 /// Given operands for an ExtractElementInst, see if we can fold the result.
4483 /// If not, this returns null.
4484 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
4485                                          const SimplifyQuery &Q, unsigned) {
4486   auto *VecVTy = cast<VectorType>(Vec->getType());
4487   if (auto *CVec = dyn_cast<Constant>(Vec)) {
4488     if (auto *CIdx = dyn_cast<Constant>(Idx))
4489       return ConstantExpr::getExtractElement(CVec, CIdx);
4490 
4491     // The index is not relevant if our vector is a splat.
4492     if (auto *Splat = CVec->getSplatValue())
4493       return Splat;
4494 
4495     if (Q.isUndefValue(Vec))
4496       return UndefValue::get(VecVTy->getElementType());
4497   }
4498 
4499   // If extracting a specified index from the vector, see if we can recursively
4500   // find a previously computed scalar that was inserted into the vector.
4501   if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
4502     // For fixed-length vector, fold into undef if index is out of bounds.
4503     unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
4504     if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
4505       return PoisonValue::get(VecVTy->getElementType());
4506     // Handle case where an element is extracted from a splat.
4507     if (IdxC->getValue().ult(MinNumElts))
4508       if (auto *Splat = getSplatValue(Vec))
4509         return Splat;
4510     if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
4511       return Elt;
4512   }
4513 
4514   // An undef extract index can be arbitrarily chosen to be an out-of-range
4515   // index value, which would result in the instruction being poison.
4516   if (Q.isUndefValue(Idx))
4517     return PoisonValue::get(VecVTy->getElementType());
4518 
4519   return nullptr;
4520 }
4521 
4522 Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx,
4523                                         const SimplifyQuery &Q) {
4524   return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
4525 }
4526 
4527 /// See if we can fold the given phi. If not, returns null.
4528 static Value *SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q) {
4529   // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
4530   //          here, because the PHI we may succeed simplifying to was not
4531   //          def-reachable from the original PHI!
4532 
4533   // If all of the PHI's incoming values are the same then replace the PHI node
4534   // with the common value.
4535   Value *CommonValue = nullptr;
4536   bool HasUndefInput = false;
4537   for (Value *Incoming : PN->incoming_values()) {
4538     // If the incoming value is the phi node itself, it can safely be skipped.
4539     if (Incoming == PN) continue;
4540     if (Q.isUndefValue(Incoming)) {
4541       // Remember that we saw an undef value, but otherwise ignore them.
4542       HasUndefInput = true;
4543       continue;
4544     }
4545     if (CommonValue && Incoming != CommonValue)
4546       return nullptr;  // Not the same, bail out.
4547     CommonValue = Incoming;
4548   }
4549 
4550   // If CommonValue is null then all of the incoming values were either undef or
4551   // equal to the phi node itself.
4552   if (!CommonValue)
4553     return UndefValue::get(PN->getType());
4554 
4555   // If we have a PHI node like phi(X, undef, X), where X is defined by some
4556   // instruction, we cannot return X as the result of the PHI node unless it
4557   // dominates the PHI block.
4558   if (HasUndefInput)
4559     return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
4560 
4561   return CommonValue;
4562 }
4563 
4564 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
4565                                Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) {
4566   if (auto *C = dyn_cast<Constant>(Op))
4567     return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
4568 
4569   if (auto *CI = dyn_cast<CastInst>(Op)) {
4570     auto *Src = CI->getOperand(0);
4571     Type *SrcTy = Src->getType();
4572     Type *MidTy = CI->getType();
4573     Type *DstTy = Ty;
4574     if (Src->getType() == Ty) {
4575       auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
4576       auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
4577       Type *SrcIntPtrTy =
4578           SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
4579       Type *MidIntPtrTy =
4580           MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
4581       Type *DstIntPtrTy =
4582           DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
4583       if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
4584                                          SrcIntPtrTy, MidIntPtrTy,
4585                                          DstIntPtrTy) == Instruction::BitCast)
4586         return Src;
4587     }
4588   }
4589 
4590   // bitcast x -> x
4591   if (CastOpc == Instruction::BitCast)
4592     if (Op->getType() == Ty)
4593       return Op;
4594 
4595   return nullptr;
4596 }
4597 
4598 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
4599                               const SimplifyQuery &Q) {
4600   return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
4601 }
4602 
4603 /// For the given destination element of a shuffle, peek through shuffles to
4604 /// match a root vector source operand that contains that element in the same
4605 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
4606 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
4607                                    int MaskVal, Value *RootVec,
4608                                    unsigned MaxRecurse) {
4609   if (!MaxRecurse--)
4610     return nullptr;
4611 
4612   // Bail out if any mask value is undefined. That kind of shuffle may be
4613   // simplified further based on demanded bits or other folds.
4614   if (MaskVal == -1)
4615     return nullptr;
4616 
4617   // The mask value chooses which source operand we need to look at next.
4618   int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
4619   int RootElt = MaskVal;
4620   Value *SourceOp = Op0;
4621   if (MaskVal >= InVecNumElts) {
4622     RootElt = MaskVal - InVecNumElts;
4623     SourceOp = Op1;
4624   }
4625 
4626   // If the source operand is a shuffle itself, look through it to find the
4627   // matching root vector.
4628   if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
4629     return foldIdentityShuffles(
4630         DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
4631         SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
4632   }
4633 
4634   // TODO: Look through bitcasts? What if the bitcast changes the vector element
4635   // size?
4636 
4637   // The source operand is not a shuffle. Initialize the root vector value for
4638   // this shuffle if that has not been done yet.
4639   if (!RootVec)
4640     RootVec = SourceOp;
4641 
4642   // Give up as soon as a source operand does not match the existing root value.
4643   if (RootVec != SourceOp)
4644     return nullptr;
4645 
4646   // The element must be coming from the same lane in the source vector
4647   // (although it may have crossed lanes in intermediate shuffles).
4648   if (RootElt != DestElt)
4649     return nullptr;
4650 
4651   return RootVec;
4652 }
4653 
4654 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
4655                                         ArrayRef<int> Mask, Type *RetTy,
4656                                         const SimplifyQuery &Q,
4657                                         unsigned MaxRecurse) {
4658   if (all_of(Mask, [](int Elem) { return Elem == UndefMaskElem; }))
4659     return UndefValue::get(RetTy);
4660 
4661   auto *InVecTy = cast<VectorType>(Op0->getType());
4662   unsigned MaskNumElts = Mask.size();
4663   ElementCount InVecEltCount = InVecTy->getElementCount();
4664 
4665   bool Scalable = InVecEltCount.isScalable();
4666 
4667   SmallVector<int, 32> Indices;
4668   Indices.assign(Mask.begin(), Mask.end());
4669 
4670   // Canonicalization: If mask does not select elements from an input vector,
4671   // replace that input vector with poison.
4672   if (!Scalable) {
4673     bool MaskSelects0 = false, MaskSelects1 = false;
4674     unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
4675     for (unsigned i = 0; i != MaskNumElts; ++i) {
4676       if (Indices[i] == -1)
4677         continue;
4678       if ((unsigned)Indices[i] < InVecNumElts)
4679         MaskSelects0 = true;
4680       else
4681         MaskSelects1 = true;
4682     }
4683     if (!MaskSelects0)
4684       Op0 = PoisonValue::get(InVecTy);
4685     if (!MaskSelects1)
4686       Op1 = PoisonValue::get(InVecTy);
4687   }
4688 
4689   auto *Op0Const = dyn_cast<Constant>(Op0);
4690   auto *Op1Const = dyn_cast<Constant>(Op1);
4691 
4692   // If all operands are constant, constant fold the shuffle. This
4693   // transformation depends on the value of the mask which is not known at
4694   // compile time for scalable vectors
4695   if (Op0Const && Op1Const)
4696     return ConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
4697 
4698   // Canonicalization: if only one input vector is constant, it shall be the
4699   // second one. This transformation depends on the value of the mask which
4700   // is not known at compile time for scalable vectors
4701   if (!Scalable && Op0Const && !Op1Const) {
4702     std::swap(Op0, Op1);
4703     ShuffleVectorInst::commuteShuffleMask(Indices,
4704                                           InVecEltCount.getKnownMinValue());
4705   }
4706 
4707   // A splat of an inserted scalar constant becomes a vector constant:
4708   // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
4709   // NOTE: We may have commuted above, so analyze the updated Indices, not the
4710   //       original mask constant.
4711   // NOTE: This transformation depends on the value of the mask which is not
4712   // known at compile time for scalable vectors
4713   Constant *C;
4714   ConstantInt *IndexC;
4715   if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
4716                                           m_ConstantInt(IndexC)))) {
4717     // Match a splat shuffle mask of the insert index allowing undef elements.
4718     int InsertIndex = IndexC->getZExtValue();
4719     if (all_of(Indices, [InsertIndex](int MaskElt) {
4720           return MaskElt == InsertIndex || MaskElt == -1;
4721         })) {
4722       assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
4723 
4724       // Shuffle mask undefs become undefined constant result elements.
4725       SmallVector<Constant *, 16> VecC(MaskNumElts, C);
4726       for (unsigned i = 0; i != MaskNumElts; ++i)
4727         if (Indices[i] == -1)
4728           VecC[i] = UndefValue::get(C->getType());
4729       return ConstantVector::get(VecC);
4730     }
4731   }
4732 
4733   // A shuffle of a splat is always the splat itself. Legal if the shuffle's
4734   // value type is same as the input vectors' type.
4735   if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
4736     if (Q.isUndefValue(Op1) && RetTy == InVecTy &&
4737         is_splat(OpShuf->getShuffleMask()))
4738       return Op0;
4739 
4740   // All remaining transformation depend on the value of the mask, which is
4741   // not known at compile time for scalable vectors.
4742   if (Scalable)
4743     return nullptr;
4744 
4745   // Don't fold a shuffle with undef mask elements. This may get folded in a
4746   // better way using demanded bits or other analysis.
4747   // TODO: Should we allow this?
4748   if (is_contained(Indices, -1))
4749     return nullptr;
4750 
4751   // Check if every element of this shuffle can be mapped back to the
4752   // corresponding element of a single root vector. If so, we don't need this
4753   // shuffle. This handles simple identity shuffles as well as chains of
4754   // shuffles that may widen/narrow and/or move elements across lanes and back.
4755   Value *RootVec = nullptr;
4756   for (unsigned i = 0; i != MaskNumElts; ++i) {
4757     // Note that recursion is limited for each vector element, so if any element
4758     // exceeds the limit, this will fail to simplify.
4759     RootVec =
4760         foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
4761 
4762     // We can't replace a widening/narrowing shuffle with one of its operands.
4763     if (!RootVec || RootVec->getType() != RetTy)
4764       return nullptr;
4765   }
4766   return RootVec;
4767 }
4768 
4769 /// Given operands for a ShuffleVectorInst, fold the result or return null.
4770 Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
4771                                        ArrayRef<int> Mask, Type *RetTy,
4772                                        const SimplifyQuery &Q) {
4773   return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
4774 }
4775 
4776 static Constant *foldConstant(Instruction::UnaryOps Opcode,
4777                               Value *&Op, const SimplifyQuery &Q) {
4778   if (auto *C = dyn_cast<Constant>(Op))
4779     return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
4780   return nullptr;
4781 }
4782 
4783 /// Given the operand for an FNeg, see if we can fold the result.  If not, this
4784 /// returns null.
4785 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF,
4786                                const SimplifyQuery &Q, unsigned MaxRecurse) {
4787   if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
4788     return C;
4789 
4790   Value *X;
4791   // fneg (fneg X) ==> X
4792   if (match(Op, m_FNeg(m_Value(X))))
4793     return X;
4794 
4795   return nullptr;
4796 }
4797 
4798 Value *llvm::SimplifyFNegInst(Value *Op, FastMathFlags FMF,
4799                               const SimplifyQuery &Q) {
4800   return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
4801 }
4802 
4803 static Constant *propagateNaN(Constant *In) {
4804   // If the input is a vector with undef elements, just return a default NaN.
4805   if (!In->isNaN())
4806     return ConstantFP::getNaN(In->getType());
4807 
4808   // Propagate the existing NaN constant when possible.
4809   // TODO: Should we quiet a signaling NaN?
4810   return In;
4811 }
4812 
4813 /// Perform folds that are common to any floating-point operation. This implies
4814 /// transforms based on poison/undef/NaN because the operation itself makes no
4815 /// difference to the result.
4816 static Constant *simplifyFPOp(ArrayRef<Value *> Ops, FastMathFlags FMF,
4817                               const SimplifyQuery &Q) {
4818   for (Value *V : Ops) {
4819     // Poison is independent of anything else. It always propagates from an
4820     // operand to a math result.
4821     if (match(V, m_Poison()))
4822       return PoisonValue::get(V->getType());
4823 
4824     bool IsNan = match(V, m_NaN());
4825     bool IsInf = match(V, m_Inf());
4826     bool IsUndef = Q.isUndefValue(V);
4827 
4828     // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
4829     // (an undef operand can be chosen to be Nan/Inf), then the result of
4830     // this operation is poison.
4831     if (FMF.noNaNs() && (IsNan || IsUndef))
4832       return PoisonValue::get(V->getType());
4833     if (FMF.noInfs() && (IsInf || IsUndef))
4834       return PoisonValue::get(V->getType());
4835 
4836     if (IsUndef || IsNan)
4837       return propagateNaN(cast<Constant>(V));
4838   }
4839   return nullptr;
4840 }
4841 
4842 /// Given operands for an FAdd, see if we can fold the result.  If not, this
4843 /// returns null.
4844 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4845                                const SimplifyQuery &Q, unsigned MaxRecurse) {
4846   if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
4847     return C;
4848 
4849   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
4850     return C;
4851 
4852   // fadd X, -0 ==> X
4853   if (match(Op1, m_NegZeroFP()))
4854     return Op0;
4855 
4856   // fadd X, 0 ==> X, when we know X is not -0
4857   if (match(Op1, m_PosZeroFP()) &&
4858       (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
4859     return Op0;
4860 
4861   // With nnan: -X + X --> 0.0 (and commuted variant)
4862   // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
4863   // Negative zeros are allowed because we always end up with positive zero:
4864   // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
4865   // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
4866   // X =  0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
4867   // X =  0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
4868   if (FMF.noNaNs()) {
4869     if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
4870         match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
4871       return ConstantFP::getNullValue(Op0->getType());
4872 
4873     if (match(Op0, m_FNeg(m_Specific(Op1))) ||
4874         match(Op1, m_FNeg(m_Specific(Op0))))
4875       return ConstantFP::getNullValue(Op0->getType());
4876   }
4877 
4878   // (X - Y) + Y --> X
4879   // Y + (X - Y) --> X
4880   Value *X;
4881   if (FMF.noSignedZeros() && FMF.allowReassoc() &&
4882       (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
4883        match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
4884     return X;
4885 
4886   return nullptr;
4887 }
4888 
4889 /// Given operands for an FSub, see if we can fold the result.  If not, this
4890 /// returns null.
4891 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4892                                const SimplifyQuery &Q, unsigned MaxRecurse) {
4893   if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
4894     return C;
4895 
4896   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
4897     return C;
4898 
4899   // fsub X, +0 ==> X
4900   if (match(Op1, m_PosZeroFP()))
4901     return Op0;
4902 
4903   // fsub X, -0 ==> X, when we know X is not -0
4904   if (match(Op1, m_NegZeroFP()) &&
4905       (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
4906     return Op0;
4907 
4908   // fsub -0.0, (fsub -0.0, X) ==> X
4909   // fsub -0.0, (fneg X) ==> X
4910   Value *X;
4911   if (match(Op0, m_NegZeroFP()) &&
4912       match(Op1, m_FNeg(m_Value(X))))
4913     return X;
4914 
4915   // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
4916   // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
4917   if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
4918       (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
4919        match(Op1, m_FNeg(m_Value(X)))))
4920     return X;
4921 
4922   // fsub nnan x, x ==> 0.0
4923   if (FMF.noNaNs() && Op0 == Op1)
4924     return Constant::getNullValue(Op0->getType());
4925 
4926   // Y - (Y - X) --> X
4927   // (X + Y) - Y --> X
4928   if (FMF.noSignedZeros() && FMF.allowReassoc() &&
4929       (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
4930        match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
4931     return X;
4932 
4933   return nullptr;
4934 }
4935 
4936 static Value *SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
4937                               const SimplifyQuery &Q, unsigned MaxRecurse) {
4938   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
4939     return C;
4940 
4941   // fmul X, 1.0 ==> X
4942   if (match(Op1, m_FPOne()))
4943     return Op0;
4944 
4945   // fmul 1.0, X ==> X
4946   if (match(Op0, m_FPOne()))
4947     return Op1;
4948 
4949   // fmul nnan nsz X, 0 ==> 0
4950   if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZeroFP()))
4951     return ConstantFP::getNullValue(Op0->getType());
4952 
4953   // fmul nnan nsz 0, X ==> 0
4954   if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
4955     return ConstantFP::getNullValue(Op1->getType());
4956 
4957   // sqrt(X) * sqrt(X) --> X, if we can:
4958   // 1. Remove the intermediate rounding (reassociate).
4959   // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
4960   // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
4961   Value *X;
4962   if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) &&
4963       FMF.allowReassoc() && FMF.noNaNs() && FMF.noSignedZeros())
4964     return X;
4965 
4966   return nullptr;
4967 }
4968 
4969 /// Given the operands for an FMul, see if we can fold the result
4970 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4971                                const SimplifyQuery &Q, unsigned MaxRecurse) {
4972   if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
4973     return C;
4974 
4975   // Now apply simplifications that do not require rounding.
4976   return SimplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse);
4977 }
4978 
4979 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4980                               const SimplifyQuery &Q) {
4981   return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit);
4982 }
4983 
4984 
4985 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4986                               const SimplifyQuery &Q) {
4987   return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit);
4988 }
4989 
4990 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4991                               const SimplifyQuery &Q) {
4992   return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit);
4993 }
4994 
4995 Value *llvm::SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
4996                              const SimplifyQuery &Q) {
4997   return ::SimplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit);
4998 }
4999 
5000 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5001                                const SimplifyQuery &Q, unsigned) {
5002   if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
5003     return C;
5004 
5005   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
5006     return C;
5007 
5008   // X / 1.0 -> X
5009   if (match(Op1, m_FPOne()))
5010     return Op0;
5011 
5012   // 0 / X -> 0
5013   // Requires that NaNs are off (X could be zero) and signed zeroes are
5014   // ignored (X could be positive or negative, so the output sign is unknown).
5015   if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
5016     return ConstantFP::getNullValue(Op0->getType());
5017 
5018   if (FMF.noNaNs()) {
5019     // X / X -> 1.0 is legal when NaNs are ignored.
5020     // We can ignore infinities because INF/INF is NaN.
5021     if (Op0 == Op1)
5022       return ConstantFP::get(Op0->getType(), 1.0);
5023 
5024     // (X * Y) / Y --> X if we can reassociate to the above form.
5025     Value *X;
5026     if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
5027       return X;
5028 
5029     // -X /  X -> -1.0 and
5030     //  X / -X -> -1.0 are legal when NaNs are ignored.
5031     // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
5032     if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
5033         match(Op1, m_FNegNSZ(m_Specific(Op0))))
5034       return ConstantFP::get(Op0->getType(), -1.0);
5035   }
5036 
5037   return nullptr;
5038 }
5039 
5040 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5041                               const SimplifyQuery &Q) {
5042   return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit);
5043 }
5044 
5045 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5046                                const SimplifyQuery &Q, unsigned) {
5047   if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
5048     return C;
5049 
5050   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
5051     return C;
5052 
5053   // Unlike fdiv, the result of frem always matches the sign of the dividend.
5054   // The constant match may include undef elements in a vector, so return a full
5055   // zero constant as the result.
5056   if (FMF.noNaNs()) {
5057     // +0 % X -> 0
5058     if (match(Op0, m_PosZeroFP()))
5059       return ConstantFP::getNullValue(Op0->getType());
5060     // -0 % X -> -0
5061     if (match(Op0, m_NegZeroFP()))
5062       return ConstantFP::getNegativeZero(Op0->getType());
5063   }
5064 
5065   return nullptr;
5066 }
5067 
5068 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5069                               const SimplifyQuery &Q) {
5070   return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit);
5071 }
5072 
5073 //=== Helper functions for higher up the class hierarchy.
5074 
5075 /// Given the operand for a UnaryOperator, see if we can fold the result.
5076 /// If not, this returns null.
5077 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
5078                            unsigned MaxRecurse) {
5079   switch (Opcode) {
5080   case Instruction::FNeg:
5081     return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
5082   default:
5083     llvm_unreachable("Unexpected opcode");
5084   }
5085 }
5086 
5087 /// Given the operand for a UnaryOperator, see if we can fold the result.
5088 /// If not, this returns null.
5089 /// Try to use FastMathFlags when folding the result.
5090 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
5091                              const FastMathFlags &FMF,
5092                              const SimplifyQuery &Q, unsigned MaxRecurse) {
5093   switch (Opcode) {
5094   case Instruction::FNeg:
5095     return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
5096   default:
5097     return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
5098   }
5099 }
5100 
5101 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
5102   return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
5103 }
5104 
5105 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
5106                           const SimplifyQuery &Q) {
5107   return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
5108 }
5109 
5110 /// Given operands for a BinaryOperator, see if we can fold the result.
5111 /// If not, this returns null.
5112 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5113                             const SimplifyQuery &Q, unsigned MaxRecurse) {
5114   switch (Opcode) {
5115   case Instruction::Add:
5116     return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse);
5117   case Instruction::Sub:
5118     return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse);
5119   case Instruction::Mul:
5120     return SimplifyMulInst(LHS, RHS, Q, MaxRecurse);
5121   case Instruction::SDiv:
5122     return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
5123   case Instruction::UDiv:
5124     return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
5125   case Instruction::SRem:
5126     return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
5127   case Instruction::URem:
5128     return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
5129   case Instruction::Shl:
5130     return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse);
5131   case Instruction::LShr:
5132     return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse);
5133   case Instruction::AShr:
5134     return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse);
5135   case Instruction::And:
5136     return SimplifyAndInst(LHS, RHS, Q, MaxRecurse);
5137   case Instruction::Or:
5138     return SimplifyOrInst(LHS, RHS, Q, MaxRecurse);
5139   case Instruction::Xor:
5140     return SimplifyXorInst(LHS, RHS, Q, MaxRecurse);
5141   case Instruction::FAdd:
5142     return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5143   case Instruction::FSub:
5144     return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5145   case Instruction::FMul:
5146     return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5147   case Instruction::FDiv:
5148     return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5149   case Instruction::FRem:
5150     return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5151   default:
5152     llvm_unreachable("Unexpected opcode");
5153   }
5154 }
5155 
5156 /// Given operands for a BinaryOperator, see if we can fold the result.
5157 /// If not, this returns null.
5158 /// Try to use FastMathFlags when folding the result.
5159 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5160                             const FastMathFlags &FMF, const SimplifyQuery &Q,
5161                             unsigned MaxRecurse) {
5162   switch (Opcode) {
5163   case Instruction::FAdd:
5164     return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
5165   case Instruction::FSub:
5166     return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
5167   case Instruction::FMul:
5168     return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
5169   case Instruction::FDiv:
5170     return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
5171   default:
5172     return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
5173   }
5174 }
5175 
5176 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5177                            const SimplifyQuery &Q) {
5178   return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
5179 }
5180 
5181 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5182                            FastMathFlags FMF, const SimplifyQuery &Q) {
5183   return ::SimplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
5184 }
5185 
5186 /// Given operands for a CmpInst, see if we can fold the result.
5187 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5188                               const SimplifyQuery &Q, unsigned MaxRecurse) {
5189   if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
5190     return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
5191   return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5192 }
5193 
5194 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5195                              const SimplifyQuery &Q) {
5196   return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
5197 }
5198 
5199 static bool IsIdempotent(Intrinsic::ID ID) {
5200   switch (ID) {
5201   default: return false;
5202 
5203   // Unary idempotent: f(f(x)) = f(x)
5204   case Intrinsic::fabs:
5205   case Intrinsic::floor:
5206   case Intrinsic::ceil:
5207   case Intrinsic::trunc:
5208   case Intrinsic::rint:
5209   case Intrinsic::nearbyint:
5210   case Intrinsic::round:
5211   case Intrinsic::roundeven:
5212   case Intrinsic::canonicalize:
5213     return true;
5214   }
5215 }
5216 
5217 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset,
5218                                    const DataLayout &DL) {
5219   GlobalValue *PtrSym;
5220   APInt PtrOffset;
5221   if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
5222     return nullptr;
5223 
5224   Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext());
5225   Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
5226   Type *Int32PtrTy = Int32Ty->getPointerTo();
5227   Type *Int64Ty = Type::getInt64Ty(Ptr->getContext());
5228 
5229   auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
5230   if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
5231     return nullptr;
5232 
5233   uint64_t OffsetInt = OffsetConstInt->getSExtValue();
5234   if (OffsetInt % 4 != 0)
5235     return nullptr;
5236 
5237   Constant *C = ConstantExpr::getGetElementPtr(
5238       Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy),
5239       ConstantInt::get(Int64Ty, OffsetInt / 4));
5240   Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL);
5241   if (!Loaded)
5242     return nullptr;
5243 
5244   auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
5245   if (!LoadedCE)
5246     return nullptr;
5247 
5248   if (LoadedCE->getOpcode() == Instruction::Trunc) {
5249     LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
5250     if (!LoadedCE)
5251       return nullptr;
5252   }
5253 
5254   if (LoadedCE->getOpcode() != Instruction::Sub)
5255     return nullptr;
5256 
5257   auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
5258   if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
5259     return nullptr;
5260   auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
5261 
5262   Constant *LoadedRHS = LoadedCE->getOperand(1);
5263   GlobalValue *LoadedRHSSym;
5264   APInt LoadedRHSOffset;
5265   if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
5266                                   DL) ||
5267       PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
5268     return nullptr;
5269 
5270   return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy);
5271 }
5272 
5273 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
5274                                      const SimplifyQuery &Q) {
5275   // Idempotent functions return the same result when called repeatedly.
5276   Intrinsic::ID IID = F->getIntrinsicID();
5277   if (IsIdempotent(IID))
5278     if (auto *II = dyn_cast<IntrinsicInst>(Op0))
5279       if (II->getIntrinsicID() == IID)
5280         return II;
5281 
5282   Value *X;
5283   switch (IID) {
5284   case Intrinsic::fabs:
5285     if (SignBitMustBeZero(Op0, Q.TLI)) return Op0;
5286     break;
5287   case Intrinsic::bswap:
5288     // bswap(bswap(x)) -> x
5289     if (match(Op0, m_BSwap(m_Value(X)))) return X;
5290     break;
5291   case Intrinsic::bitreverse:
5292     // bitreverse(bitreverse(x)) -> x
5293     if (match(Op0, m_BitReverse(m_Value(X)))) return X;
5294     break;
5295   case Intrinsic::ctpop: {
5296     // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
5297     // ctpop(and X, 1) --> and X, 1
5298     unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
5299     if (MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, BitWidth - 1),
5300                           Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
5301       return Op0;
5302     break;
5303   }
5304   case Intrinsic::exp:
5305     // exp(log(x)) -> x
5306     if (Q.CxtI->hasAllowReassoc() &&
5307         match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X)))) return X;
5308     break;
5309   case Intrinsic::exp2:
5310     // exp2(log2(x)) -> x
5311     if (Q.CxtI->hasAllowReassoc() &&
5312         match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X)))) return X;
5313     break;
5314   case Intrinsic::log:
5315     // log(exp(x)) -> x
5316     if (Q.CxtI->hasAllowReassoc() &&
5317         match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X)))) return X;
5318     break;
5319   case Intrinsic::log2:
5320     // log2(exp2(x)) -> x
5321     if (Q.CxtI->hasAllowReassoc() &&
5322         (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) ||
5323          match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0),
5324                                                 m_Value(X))))) return X;
5325     break;
5326   case Intrinsic::log10:
5327     // log10(pow(10.0, x)) -> x
5328     if (Q.CxtI->hasAllowReassoc() &&
5329         match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0),
5330                                                m_Value(X)))) return X;
5331     break;
5332   case Intrinsic::floor:
5333   case Intrinsic::trunc:
5334   case Intrinsic::ceil:
5335   case Intrinsic::round:
5336   case Intrinsic::roundeven:
5337   case Intrinsic::nearbyint:
5338   case Intrinsic::rint: {
5339     // floor (sitofp x) -> sitofp x
5340     // floor (uitofp x) -> uitofp x
5341     //
5342     // Converting from int always results in a finite integral number or
5343     // infinity. For either of those inputs, these rounding functions always
5344     // return the same value, so the rounding can be eliminated.
5345     if (match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value())))
5346       return Op0;
5347     break;
5348   }
5349   case Intrinsic::experimental_vector_reverse:
5350     // experimental.vector.reverse(experimental.vector.reverse(x)) -> x
5351     if (match(Op0,
5352               m_Intrinsic<Intrinsic::experimental_vector_reverse>(m_Value(X))))
5353       return X;
5354     break;
5355   default:
5356     break;
5357   }
5358 
5359   return nullptr;
5360 }
5361 
5362 static APInt getMaxMinLimit(Intrinsic::ID IID, unsigned BitWidth) {
5363   switch (IID) {
5364   case Intrinsic::smax: return APInt::getSignedMaxValue(BitWidth);
5365   case Intrinsic::smin: return APInt::getSignedMinValue(BitWidth);
5366   case Intrinsic::umax: return APInt::getMaxValue(BitWidth);
5367   case Intrinsic::umin: return APInt::getMinValue(BitWidth);
5368   default: llvm_unreachable("Unexpected intrinsic");
5369   }
5370 }
5371 
5372 static ICmpInst::Predicate getMaxMinPredicate(Intrinsic::ID IID) {
5373   switch (IID) {
5374   case Intrinsic::smax: return ICmpInst::ICMP_SGE;
5375   case Intrinsic::smin: return ICmpInst::ICMP_SLE;
5376   case Intrinsic::umax: return ICmpInst::ICMP_UGE;
5377   case Intrinsic::umin: return ICmpInst::ICMP_ULE;
5378   default: llvm_unreachable("Unexpected intrinsic");
5379   }
5380 }
5381 
5382 /// Given a min/max intrinsic, see if it can be removed based on having an
5383 /// operand that is another min/max intrinsic with shared operand(s). The caller
5384 /// is expected to swap the operand arguments to handle commutation.
5385 static Value *foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1) {
5386   Value *X, *Y;
5387   if (!match(Op0, m_MaxOrMin(m_Value(X), m_Value(Y))))
5388     return nullptr;
5389 
5390   auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
5391   if (!MM0)
5392     return nullptr;
5393   Intrinsic::ID IID0 = MM0->getIntrinsicID();
5394 
5395   if (Op1 == X || Op1 == Y ||
5396       match(Op1, m_c_MaxOrMin(m_Specific(X), m_Specific(Y)))) {
5397     // max (max X, Y), X --> max X, Y
5398     if (IID0 == IID)
5399       return MM0;
5400     // max (min X, Y), X --> X
5401     if (IID0 == getInverseMinMaxIntrinsic(IID))
5402       return Op1;
5403   }
5404   return nullptr;
5405 }
5406 
5407 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
5408                                       const SimplifyQuery &Q) {
5409   Intrinsic::ID IID = F->getIntrinsicID();
5410   Type *ReturnType = F->getReturnType();
5411   unsigned BitWidth = ReturnType->getScalarSizeInBits();
5412   switch (IID) {
5413   case Intrinsic::abs:
5414     // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
5415     // It is always ok to pick the earlier abs. We'll just lose nsw if its only
5416     // on the outer abs.
5417     if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(), m_Value())))
5418       return Op0;
5419     break;
5420 
5421   case Intrinsic::cttz: {
5422     Value *X;
5423     if (match(Op0, m_Shl(m_One(), m_Value(X))))
5424       return X;
5425     break;
5426   }
5427   case Intrinsic::ctlz: {
5428     Value *X;
5429     if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
5430       return X;
5431     if (match(Op0, m_AShr(m_Negative(), m_Value())))
5432       return Constant::getNullValue(ReturnType);
5433     break;
5434   }
5435   case Intrinsic::smax:
5436   case Intrinsic::smin:
5437   case Intrinsic::umax:
5438   case Intrinsic::umin: {
5439     // If the arguments are the same, this is a no-op.
5440     if (Op0 == Op1)
5441       return Op0;
5442 
5443     // Canonicalize constant operand as Op1.
5444     if (isa<Constant>(Op0))
5445       std::swap(Op0, Op1);
5446 
5447     // Assume undef is the limit value.
5448     if (Q.isUndefValue(Op1))
5449       return ConstantInt::get(ReturnType, getMaxMinLimit(IID, BitWidth));
5450 
5451     const APInt *C;
5452     if (match(Op1, m_APIntAllowUndef(C))) {
5453       // Clamp to limit value. For example:
5454       // umax(i8 %x, i8 255) --> 255
5455       if (*C == getMaxMinLimit(IID, BitWidth))
5456         return ConstantInt::get(ReturnType, *C);
5457 
5458       // If the constant op is the opposite of the limit value, the other must
5459       // be larger/smaller or equal. For example:
5460       // umin(i8 %x, i8 255) --> %x
5461       if (*C == getMaxMinLimit(getInverseMinMaxIntrinsic(IID), BitWidth))
5462         return Op0;
5463 
5464       // Remove nested call if constant operands allow it. Example:
5465       // max (max X, 7), 5 -> max X, 7
5466       auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
5467       if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
5468         // TODO: loosen undef/splat restrictions for vector constants.
5469         Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
5470         const APInt *InnerC;
5471         if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
5472             ((IID == Intrinsic::smax && InnerC->sge(*C)) ||
5473              (IID == Intrinsic::smin && InnerC->sle(*C)) ||
5474              (IID == Intrinsic::umax && InnerC->uge(*C)) ||
5475              (IID == Intrinsic::umin && InnerC->ule(*C))))
5476           return Op0;
5477       }
5478     }
5479 
5480     if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
5481       return V;
5482     if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
5483       return V;
5484 
5485     ICmpInst::Predicate Pred = getMaxMinPredicate(IID);
5486     if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
5487       return Op0;
5488     if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
5489       return Op1;
5490 
5491     if (Optional<bool> Imp =
5492             isImpliedByDomCondition(Pred, Op0, Op1, Q.CxtI, Q.DL))
5493       return *Imp ? Op0 : Op1;
5494     if (Optional<bool> Imp =
5495             isImpliedByDomCondition(Pred, Op1, Op0, Q.CxtI, Q.DL))
5496       return *Imp ? Op1 : Op0;
5497 
5498     break;
5499   }
5500   case Intrinsic::usub_with_overflow:
5501   case Intrinsic::ssub_with_overflow:
5502     // X - X -> { 0, false }
5503     // X - undef -> { 0, false }
5504     // undef - X -> { 0, false }
5505     if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5506       return Constant::getNullValue(ReturnType);
5507     break;
5508   case Intrinsic::uadd_with_overflow:
5509   case Intrinsic::sadd_with_overflow:
5510     // X + undef -> { -1, false }
5511     // undef + x -> { -1, false }
5512     if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
5513       return ConstantStruct::get(
5514           cast<StructType>(ReturnType),
5515           {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
5516            Constant::getNullValue(ReturnType->getStructElementType(1))});
5517     }
5518     break;
5519   case Intrinsic::umul_with_overflow:
5520   case Intrinsic::smul_with_overflow:
5521     // 0 * X -> { 0, false }
5522     // X * 0 -> { 0, false }
5523     if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
5524       return Constant::getNullValue(ReturnType);
5525     // undef * X -> { 0, false }
5526     // X * undef -> { 0, false }
5527     if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5528       return Constant::getNullValue(ReturnType);
5529     break;
5530   case Intrinsic::uadd_sat:
5531     // sat(MAX + X) -> MAX
5532     // sat(X + MAX) -> MAX
5533     if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
5534       return Constant::getAllOnesValue(ReturnType);
5535     LLVM_FALLTHROUGH;
5536   case Intrinsic::sadd_sat:
5537     // sat(X + undef) -> -1
5538     // sat(undef + X) -> -1
5539     // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
5540     // For signed: Assume undef is ~X, in which case X + ~X = -1.
5541     if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5542       return Constant::getAllOnesValue(ReturnType);
5543 
5544     // X + 0 -> X
5545     if (match(Op1, m_Zero()))
5546       return Op0;
5547     // 0 + X -> X
5548     if (match(Op0, m_Zero()))
5549       return Op1;
5550     break;
5551   case Intrinsic::usub_sat:
5552     // sat(0 - X) -> 0, sat(X - MAX) -> 0
5553     if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
5554       return Constant::getNullValue(ReturnType);
5555     LLVM_FALLTHROUGH;
5556   case Intrinsic::ssub_sat:
5557     // X - X -> 0, X - undef -> 0, undef - X -> 0
5558     if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5559       return Constant::getNullValue(ReturnType);
5560     // X - 0 -> X
5561     if (match(Op1, m_Zero()))
5562       return Op0;
5563     break;
5564   case Intrinsic::load_relative:
5565     if (auto *C0 = dyn_cast<Constant>(Op0))
5566       if (auto *C1 = dyn_cast<Constant>(Op1))
5567         return SimplifyRelativeLoad(C0, C1, Q.DL);
5568     break;
5569   case Intrinsic::powi:
5570     if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
5571       // powi(x, 0) -> 1.0
5572       if (Power->isZero())
5573         return ConstantFP::get(Op0->getType(), 1.0);
5574       // powi(x, 1) -> x
5575       if (Power->isOne())
5576         return Op0;
5577     }
5578     break;
5579   case Intrinsic::copysign:
5580     // copysign X, X --> X
5581     if (Op0 == Op1)
5582       return Op0;
5583     // copysign -X, X --> X
5584     // copysign X, -X --> -X
5585     if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5586         match(Op1, m_FNeg(m_Specific(Op0))))
5587       return Op1;
5588     break;
5589   case Intrinsic::maxnum:
5590   case Intrinsic::minnum:
5591   case Intrinsic::maximum:
5592   case Intrinsic::minimum: {
5593     // If the arguments are the same, this is a no-op.
5594     if (Op0 == Op1) return Op0;
5595 
5596     // Canonicalize constant operand as Op1.
5597     if (isa<Constant>(Op0))
5598       std::swap(Op0, Op1);
5599 
5600     // If an argument is undef, return the other argument.
5601     if (Q.isUndefValue(Op1))
5602       return Op0;
5603 
5604     bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
5605     bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
5606 
5607     // minnum(X, nan) -> X
5608     // maxnum(X, nan) -> X
5609     // minimum(X, nan) -> nan
5610     // maximum(X, nan) -> nan
5611     if (match(Op1, m_NaN()))
5612       return PropagateNaN ? propagateNaN(cast<Constant>(Op1)) : Op0;
5613 
5614     // In the following folds, inf can be replaced with the largest finite
5615     // float, if the ninf flag is set.
5616     const APFloat *C;
5617     if (match(Op1, m_APFloat(C)) &&
5618         (C->isInfinity() || (Q.CxtI->hasNoInfs() && C->isLargest()))) {
5619       // minnum(X, -inf) -> -inf
5620       // maxnum(X, +inf) -> +inf
5621       // minimum(X, -inf) -> -inf if nnan
5622       // maximum(X, +inf) -> +inf if nnan
5623       if (C->isNegative() == IsMin && (!PropagateNaN || Q.CxtI->hasNoNaNs()))
5624         return ConstantFP::get(ReturnType, *C);
5625 
5626       // minnum(X, +inf) -> X if nnan
5627       // maxnum(X, -inf) -> X if nnan
5628       // minimum(X, +inf) -> X
5629       // maximum(X, -inf) -> X
5630       if (C->isNegative() != IsMin && (PropagateNaN || Q.CxtI->hasNoNaNs()))
5631         return Op0;
5632     }
5633 
5634     // Min/max of the same operation with common operand:
5635     // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
5636     if (auto *M0 = dyn_cast<IntrinsicInst>(Op0))
5637       if (M0->getIntrinsicID() == IID &&
5638           (M0->getOperand(0) == Op1 || M0->getOperand(1) == Op1))
5639         return Op0;
5640     if (auto *M1 = dyn_cast<IntrinsicInst>(Op1))
5641       if (M1->getIntrinsicID() == IID &&
5642           (M1->getOperand(0) == Op0 || M1->getOperand(1) == Op0))
5643         return Op1;
5644 
5645     break;
5646   }
5647   case Intrinsic::experimental_vector_extract: {
5648     Type *ReturnType = F->getReturnType();
5649 
5650     // (extract_vector (insert_vector _, X, 0), 0) -> X
5651     unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
5652     Value *X = nullptr;
5653     if (match(Op0, m_Intrinsic<Intrinsic::experimental_vector_insert>(
5654                        m_Value(), m_Value(X), m_Zero())) &&
5655         IdxN == 0 && X->getType() == ReturnType)
5656       return X;
5657 
5658     break;
5659   }
5660   default:
5661     break;
5662   }
5663 
5664   return nullptr;
5665 }
5666 
5667 static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
5668 
5669   // Intrinsics with no operands have some kind of side effect. Don't simplify.
5670   unsigned NumOperands = Call->getNumArgOperands();
5671   if (!NumOperands)
5672     return nullptr;
5673 
5674   Function *F = cast<Function>(Call->getCalledFunction());
5675   Intrinsic::ID IID = F->getIntrinsicID();
5676   if (NumOperands == 1)
5677     return simplifyUnaryIntrinsic(F, Call->getArgOperand(0), Q);
5678 
5679   if (NumOperands == 2)
5680     return simplifyBinaryIntrinsic(F, Call->getArgOperand(0),
5681                                    Call->getArgOperand(1), Q);
5682 
5683   // Handle intrinsics with 3 or more arguments.
5684   switch (IID) {
5685   case Intrinsic::masked_load:
5686   case Intrinsic::masked_gather: {
5687     Value *MaskArg = Call->getArgOperand(2);
5688     Value *PassthruArg = Call->getArgOperand(3);
5689     // If the mask is all zeros or undef, the "passthru" argument is the result.
5690     if (maskIsAllZeroOrUndef(MaskArg))
5691       return PassthruArg;
5692     return nullptr;
5693   }
5694   case Intrinsic::fshl:
5695   case Intrinsic::fshr: {
5696     Value *Op0 = Call->getArgOperand(0), *Op1 = Call->getArgOperand(1),
5697           *ShAmtArg = Call->getArgOperand(2);
5698 
5699     // If both operands are undef, the result is undef.
5700     if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
5701       return UndefValue::get(F->getReturnType());
5702 
5703     // If shift amount is undef, assume it is zero.
5704     if (Q.isUndefValue(ShAmtArg))
5705       return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1);
5706 
5707     const APInt *ShAmtC;
5708     if (match(ShAmtArg, m_APInt(ShAmtC))) {
5709       // If there's effectively no shift, return the 1st arg or 2nd arg.
5710       APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
5711       if (ShAmtC->urem(BitWidth).isNullValue())
5712         return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1);
5713     }
5714     return nullptr;
5715   }
5716   case Intrinsic::fma:
5717   case Intrinsic::fmuladd: {
5718     Value *Op0 = Call->getArgOperand(0);
5719     Value *Op1 = Call->getArgOperand(1);
5720     Value *Op2 = Call->getArgOperand(2);
5721     if (Value *V = simplifyFPOp({ Op0, Op1, Op2 }, {}, Q))
5722       return V;
5723     return nullptr;
5724   }
5725   case Intrinsic::smul_fix:
5726   case Intrinsic::smul_fix_sat: {
5727     Value *Op0 = Call->getArgOperand(0);
5728     Value *Op1 = Call->getArgOperand(1);
5729     Value *Op2 = Call->getArgOperand(2);
5730     Type *ReturnType = F->getReturnType();
5731 
5732     // Canonicalize constant operand as Op1 (ConstantFolding handles the case
5733     // when both Op0 and Op1 are constant so we do not care about that special
5734     // case here).
5735     if (isa<Constant>(Op0))
5736       std::swap(Op0, Op1);
5737 
5738     // X * 0 -> 0
5739     if (match(Op1, m_Zero()))
5740       return Constant::getNullValue(ReturnType);
5741 
5742     // X * undef -> 0
5743     if (Q.isUndefValue(Op1))
5744       return Constant::getNullValue(ReturnType);
5745 
5746     // X * (1 << Scale) -> X
5747     APInt ScaledOne =
5748         APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
5749                             cast<ConstantInt>(Op2)->getZExtValue());
5750     if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
5751       return Op0;
5752 
5753     return nullptr;
5754   }
5755   case Intrinsic::experimental_vector_insert: {
5756     Value *Vec = Call->getArgOperand(0);
5757     Value *SubVec = Call->getArgOperand(1);
5758     Value *Idx = Call->getArgOperand(2);
5759     Type *ReturnType = F->getReturnType();
5760 
5761     // (insert_vector Y, (extract_vector X, 0), 0) -> X
5762     // where: Y is X, or Y is undef
5763     unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
5764     Value *X = nullptr;
5765     if (match(SubVec, m_Intrinsic<Intrinsic::experimental_vector_extract>(
5766                           m_Value(X), m_Zero())) &&
5767         (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
5768         X->getType() == ReturnType)
5769       return X;
5770 
5771     return nullptr;
5772   }
5773   default:
5774     return nullptr;
5775   }
5776 }
5777 
5778 static Value *tryConstantFoldCall(CallBase *Call, const SimplifyQuery &Q) {
5779   auto *F = dyn_cast<Function>(Call->getCalledOperand());
5780   if (!F || !canConstantFoldCallTo(Call, F))
5781     return nullptr;
5782 
5783   SmallVector<Constant *, 4> ConstantArgs;
5784   unsigned NumArgs = Call->getNumArgOperands();
5785   ConstantArgs.reserve(NumArgs);
5786   for (auto &Arg : Call->args()) {
5787     Constant *C = dyn_cast<Constant>(&Arg);
5788     if (!C) {
5789       if (isa<MetadataAsValue>(Arg.get()))
5790         continue;
5791       return nullptr;
5792     }
5793     ConstantArgs.push_back(C);
5794   }
5795 
5796   return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
5797 }
5798 
5799 Value *llvm::SimplifyCall(CallBase *Call, const SimplifyQuery &Q) {
5800   // musttail calls can only be simplified if they are also DCEd.
5801   // As we can't guarantee this here, don't simplify them.
5802   if (Call->isMustTailCall())
5803     return nullptr;
5804 
5805   // call undef -> poison
5806   // call null -> poison
5807   Value *Callee = Call->getCalledOperand();
5808   if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
5809     return PoisonValue::get(Call->getType());
5810 
5811   if (Value *V = tryConstantFoldCall(Call, Q))
5812     return V;
5813 
5814   auto *F = dyn_cast<Function>(Callee);
5815   if (F && F->isIntrinsic())
5816     if (Value *Ret = simplifyIntrinsic(Call, Q))
5817       return Ret;
5818 
5819   return nullptr;
5820 }
5821 
5822 /// Given operands for a Freeze, see if we can fold the result.
5823 static Value *SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
5824   // Use a utility function defined in ValueTracking.
5825   if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT))
5826     return Op0;
5827   // We have room for improvement.
5828   return nullptr;
5829 }
5830 
5831 Value *llvm::SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
5832   return ::SimplifyFreezeInst(Op0, Q);
5833 }
5834 
5835 static Constant *ConstructLoadOperandConstant(Value *Op) {
5836   SmallVector<Value *, 4> Worklist;
5837   // Invalid IR in unreachable code may contain self-referential values. Don't infinitely loop.
5838   SmallPtrSet<Value *, 4> Visited;
5839   Worklist.push_back(Op);
5840   while (true) {
5841     Value *CurOp = Worklist.back();
5842     if (!Visited.insert(CurOp).second)
5843       return nullptr;
5844     if (isa<Constant>(CurOp))
5845       break;
5846     if (auto *BC = dyn_cast<BitCastOperator>(CurOp)) {
5847       Worklist.push_back(BC->getOperand(0));
5848     } else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
5849       for (unsigned I = 1; I != GEP->getNumOperands(); ++I) {
5850         if (!isa<Constant>(GEP->getOperand(I)))
5851           return nullptr;
5852       }
5853       Worklist.push_back(GEP->getOperand(0));
5854     } else if (auto *II = dyn_cast<IntrinsicInst>(CurOp)) {
5855       if (II->isLaunderOrStripInvariantGroup())
5856         Worklist.push_back(II->getOperand(0));
5857       else
5858         return nullptr;
5859     } else {
5860       return nullptr;
5861     }
5862   }
5863 
5864   Constant *NewOp = cast<Constant>(Worklist.pop_back_val());
5865   while (!Worklist.empty()) {
5866     Value *CurOp = Worklist.pop_back_val();
5867     if (isa<BitCastOperator>(CurOp)) {
5868       NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
5869     } else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
5870       SmallVector<Constant *> Idxs;
5871       Idxs.reserve(GEP->getNumOperands() - 1);
5872       for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
5873         Idxs.push_back(cast<Constant>(GEP->getOperand(I)));
5874       }
5875       NewOp = ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), NewOp,
5876                                              Idxs, GEP->isInBounds(),
5877                                              GEP->getInRangeIndex());
5878     } else {
5879       assert(isa<IntrinsicInst>(CurOp) &&
5880              cast<IntrinsicInst>(CurOp)->isLaunderOrStripInvariantGroup() &&
5881              "expected invariant group intrinsic");
5882       NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
5883     }
5884   }
5885   return NewOp;
5886 }
5887 
5888 static Value *SimplifyLoadInst(LoadInst *LI, const SimplifyQuery &Q) {
5889   if (LI->isVolatile())
5890     return nullptr;
5891 
5892   if (auto *C = ConstantFoldInstruction(LI, Q.DL))
5893     return C;
5894 
5895   // The following only catches more cases than ConstantFoldInstruction() if the
5896   // load operand wasn't a constant. Specifically, invariant.group intrinsics.
5897   if (isa<Constant>(LI->getPointerOperand()))
5898     return nullptr;
5899 
5900   if (auto *C = dyn_cast_or_null<Constant>(
5901           ConstructLoadOperandConstant(LI->getPointerOperand())))
5902     return ConstantFoldLoadFromConstPtr(C, LI->getType(), Q.DL);
5903 
5904   return nullptr;
5905 }
5906 
5907 /// See if we can compute a simplified version of this instruction.
5908 /// If not, this returns null.
5909 
5910 Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
5911                                  OptimizationRemarkEmitter *ORE) {
5912   const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
5913   Value *Result;
5914 
5915   switch (I->getOpcode()) {
5916   default:
5917     Result = ConstantFoldInstruction(I, Q.DL, Q.TLI);
5918     break;
5919   case Instruction::FNeg:
5920     Result = SimplifyFNegInst(I->getOperand(0), I->getFastMathFlags(), Q);
5921     break;
5922   case Instruction::FAdd:
5923     Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
5924                               I->getFastMathFlags(), Q);
5925     break;
5926   case Instruction::Add:
5927     Result =
5928         SimplifyAddInst(I->getOperand(0), I->getOperand(1),
5929                         Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
5930                         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
5931     break;
5932   case Instruction::FSub:
5933     Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
5934                               I->getFastMathFlags(), Q);
5935     break;
5936   case Instruction::Sub:
5937     Result =
5938         SimplifySubInst(I->getOperand(0), I->getOperand(1),
5939                         Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
5940                         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
5941     break;
5942   case Instruction::FMul:
5943     Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
5944                               I->getFastMathFlags(), Q);
5945     break;
5946   case Instruction::Mul:
5947     Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), Q);
5948     break;
5949   case Instruction::SDiv:
5950     Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), Q);
5951     break;
5952   case Instruction::UDiv:
5953     Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), Q);
5954     break;
5955   case Instruction::FDiv:
5956     Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1),
5957                               I->getFastMathFlags(), Q);
5958     break;
5959   case Instruction::SRem:
5960     Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), Q);
5961     break;
5962   case Instruction::URem:
5963     Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), Q);
5964     break;
5965   case Instruction::FRem:
5966     Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1),
5967                               I->getFastMathFlags(), Q);
5968     break;
5969   case Instruction::Shl:
5970     Result =
5971         SimplifyShlInst(I->getOperand(0), I->getOperand(1),
5972                         Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
5973                         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
5974     break;
5975   case Instruction::LShr:
5976     Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
5977                               Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
5978     break;
5979   case Instruction::AShr:
5980     Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
5981                               Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
5982     break;
5983   case Instruction::And:
5984     Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q);
5985     break;
5986   case Instruction::Or:
5987     Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), Q);
5988     break;
5989   case Instruction::Xor:
5990     Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), Q);
5991     break;
5992   case Instruction::ICmp:
5993     Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
5994                               I->getOperand(0), I->getOperand(1), Q);
5995     break;
5996   case Instruction::FCmp:
5997     Result =
5998         SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0),
5999                          I->getOperand(1), I->getFastMathFlags(), Q);
6000     break;
6001   case Instruction::Select:
6002     Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
6003                                 I->getOperand(2), Q);
6004     break;
6005   case Instruction::GetElementPtr: {
6006     SmallVector<Value *, 8> Ops(I->operands());
6007     Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
6008                              Ops, Q);
6009     break;
6010   }
6011   case Instruction::InsertValue: {
6012     InsertValueInst *IV = cast<InsertValueInst>(I);
6013     Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
6014                                      IV->getInsertedValueOperand(),
6015                                      IV->getIndices(), Q);
6016     break;
6017   }
6018   case Instruction::InsertElement: {
6019     auto *IE = cast<InsertElementInst>(I);
6020     Result = SimplifyInsertElementInst(IE->getOperand(0), IE->getOperand(1),
6021                                        IE->getOperand(2), Q);
6022     break;
6023   }
6024   case Instruction::ExtractValue: {
6025     auto *EVI = cast<ExtractValueInst>(I);
6026     Result = SimplifyExtractValueInst(EVI->getAggregateOperand(),
6027                                       EVI->getIndices(), Q);
6028     break;
6029   }
6030   case Instruction::ExtractElement: {
6031     auto *EEI = cast<ExtractElementInst>(I);
6032     Result = SimplifyExtractElementInst(EEI->getVectorOperand(),
6033                                         EEI->getIndexOperand(), Q);
6034     break;
6035   }
6036   case Instruction::ShuffleVector: {
6037     auto *SVI = cast<ShuffleVectorInst>(I);
6038     Result =
6039         SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1),
6040                                   SVI->getShuffleMask(), SVI->getType(), Q);
6041     break;
6042   }
6043   case Instruction::PHI:
6044     Result = SimplifyPHINode(cast<PHINode>(I), Q);
6045     break;
6046   case Instruction::Call: {
6047     Result = SimplifyCall(cast<CallInst>(I), Q);
6048     break;
6049   }
6050   case Instruction::Freeze:
6051     Result = SimplifyFreezeInst(I->getOperand(0), Q);
6052     break;
6053 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
6054 #include "llvm/IR/Instruction.def"
6055 #undef HANDLE_CAST_INST
6056     Result =
6057         SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), Q);
6058     break;
6059   case Instruction::Alloca:
6060     // No simplifications for Alloca and it can't be constant folded.
6061     Result = nullptr;
6062     break;
6063   case Instruction::Load:
6064     Result = SimplifyLoadInst(cast<LoadInst>(I), Q);
6065     break;
6066   }
6067 
6068   /// If called on unreachable code, the above logic may report that the
6069   /// instruction simplified to itself.  Make life easier for users by
6070   /// detecting that case here, returning a safe value instead.
6071   return Result == I ? UndefValue::get(I->getType()) : Result;
6072 }
6073 
6074 /// Implementation of recursive simplification through an instruction's
6075 /// uses.
6076 ///
6077 /// This is the common implementation of the recursive simplification routines.
6078 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
6079 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
6080 /// instructions to process and attempt to simplify it using
6081 /// InstructionSimplify. Recursively visited users which could not be
6082 /// simplified themselves are to the optional UnsimplifiedUsers set for
6083 /// further processing by the caller.
6084 ///
6085 /// This routine returns 'true' only when *it* simplifies something. The passed
6086 /// in simplified value does not count toward this.
6087 static bool replaceAndRecursivelySimplifyImpl(
6088     Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
6089     const DominatorTree *DT, AssumptionCache *AC,
6090     SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
6091   bool Simplified = false;
6092   SmallSetVector<Instruction *, 8> Worklist;
6093   const DataLayout &DL = I->getModule()->getDataLayout();
6094 
6095   // If we have an explicit value to collapse to, do that round of the
6096   // simplification loop by hand initially.
6097   if (SimpleV) {
6098     for (User *U : I->users())
6099       if (U != I)
6100         Worklist.insert(cast<Instruction>(U));
6101 
6102     // Replace the instruction with its simplified value.
6103     I->replaceAllUsesWith(SimpleV);
6104 
6105     // Gracefully handle edge cases where the instruction is not wired into any
6106     // parent block.
6107     if (I->getParent() && !I->isEHPad() && !I->isTerminator() &&
6108         !I->mayHaveSideEffects())
6109       I->eraseFromParent();
6110   } else {
6111     Worklist.insert(I);
6112   }
6113 
6114   // Note that we must test the size on each iteration, the worklist can grow.
6115   for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
6116     I = Worklist[Idx];
6117 
6118     // See if this instruction simplifies.
6119     SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC});
6120     if (!SimpleV) {
6121       if (UnsimplifiedUsers)
6122         UnsimplifiedUsers->insert(I);
6123       continue;
6124     }
6125 
6126     Simplified = true;
6127 
6128     // Stash away all the uses of the old instruction so we can check them for
6129     // recursive simplifications after a RAUW. This is cheaper than checking all
6130     // uses of To on the recursive step in most cases.
6131     for (User *U : I->users())
6132       Worklist.insert(cast<Instruction>(U));
6133 
6134     // Replace the instruction with its simplified value.
6135     I->replaceAllUsesWith(SimpleV);
6136 
6137     // Gracefully handle edge cases where the instruction is not wired into any
6138     // parent block.
6139     if (I->getParent() && !I->isEHPad() && !I->isTerminator() &&
6140         !I->mayHaveSideEffects())
6141       I->eraseFromParent();
6142   }
6143   return Simplified;
6144 }
6145 
6146 bool llvm::replaceAndRecursivelySimplify(
6147     Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
6148     const DominatorTree *DT, AssumptionCache *AC,
6149     SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
6150   assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
6151   assert(SimpleV && "Must provide a simplified value.");
6152   return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
6153                                            UnsimplifiedUsers);
6154 }
6155 
6156 namespace llvm {
6157 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
6158   auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
6159   auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
6160   auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
6161   auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
6162   auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
6163   auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
6164   return {F.getParent()->getDataLayout(), TLI, DT, AC};
6165 }
6166 
6167 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
6168                                          const DataLayout &DL) {
6169   return {DL, &AR.TLI, &AR.DT, &AR.AC};
6170 }
6171 
6172 template <class T, class... TArgs>
6173 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
6174                                          Function &F) {
6175   auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
6176   auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
6177   auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
6178   return {F.getParent()->getDataLayout(), TLI, DT, AC};
6179 }
6180 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
6181                                                   Function &);
6182 }
6183