1 //===- InstCombineMulDivRem.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv,
10 // srem, urem, frem.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/InstrTypes.h"
23 #include "llvm/IR/Instruction.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/PatternMatch.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/KnownBits.h"
34 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
35 #include "llvm/Transforms/InstCombine/InstCombiner.h"
36 #include "llvm/Transforms/Utils/BuildLibCalls.h"
37 #include <cassert>
38 #include <cstddef>
39 #include <cstdint>
40 #include <utility>
41 
42 using namespace llvm;
43 using namespace PatternMatch;
44 
45 #define DEBUG_TYPE "instcombine"
46 
47 /// The specific integer value is used in a context where it is known to be
48 /// non-zero.  If this allows us to simplify the computation, do so and return
49 /// the new operand, otherwise return null.
50 static Value *simplifyValueKnownNonZero(Value *V, InstCombinerImpl &IC,
51                                         Instruction &CxtI) {
52   // If V has multiple uses, then we would have to do more analysis to determine
53   // if this is safe.  For example, the use could be in dynamically unreached
54   // code.
55   if (!V->hasOneUse()) return nullptr;
56 
57   bool MadeChange = false;
58 
59   // ((1 << A) >>u B) --> (1 << (A-B))
60   // Because V cannot be zero, we know that B is less than A.
61   Value *A = nullptr, *B = nullptr, *One = nullptr;
62   if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) &&
63       match(One, m_One())) {
64     A = IC.Builder.CreateSub(A, B);
65     return IC.Builder.CreateShl(One, A);
66   }
67 
68   // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it
69   // inexact.  Similarly for <<.
70   BinaryOperator *I = dyn_cast<BinaryOperator>(V);
71   if (I && I->isLogicalShift() &&
72       IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) {
73     // We know that this is an exact/nuw shift and that the input is a
74     // non-zero context as well.
75     if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) {
76       IC.replaceOperand(*I, 0, V2);
77       MadeChange = true;
78     }
79 
80     if (I->getOpcode() == Instruction::LShr && !I->isExact()) {
81       I->setIsExact();
82       MadeChange = true;
83     }
84 
85     if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) {
86       I->setHasNoUnsignedWrap();
87       MadeChange = true;
88     }
89   }
90 
91   // TODO: Lots more we could do here:
92   //    If V is a phi node, we can call this on each of its operands.
93   //    "select cond, X, 0" can simplify to "X".
94 
95   return MadeChange ? V : nullptr;
96 }
97 
98 /// A helper routine of InstCombiner::visitMul().
99 ///
100 /// If C is a scalar/fixed width vector of known powers of 2, then this
101 /// function returns a new scalar/fixed width vector obtained from logBase2
102 /// of C.
103 /// Return a null pointer otherwise.
104 static Constant *getLogBase2(Type *Ty, Constant *C) {
105   const APInt *IVal;
106   if (match(C, m_APInt(IVal)) && IVal->isPowerOf2())
107     return ConstantInt::get(Ty, IVal->logBase2());
108 
109   // FIXME: We can extract pow of 2 of splat constant for scalable vectors.
110   if (!isa<FixedVectorType>(Ty))
111     return nullptr;
112 
113   SmallVector<Constant *, 4> Elts;
114   for (unsigned I = 0, E = cast<FixedVectorType>(Ty)->getNumElements(); I != E;
115        ++I) {
116     Constant *Elt = C->getAggregateElement(I);
117     if (!Elt)
118       return nullptr;
119     if (isa<UndefValue>(Elt)) {
120       Elts.push_back(UndefValue::get(Ty->getScalarType()));
121       continue;
122     }
123     if (!match(Elt, m_APInt(IVal)) || !IVal->isPowerOf2())
124       return nullptr;
125     Elts.push_back(ConstantInt::get(Ty->getScalarType(), IVal->logBase2()));
126   }
127 
128   return ConstantVector::get(Elts);
129 }
130 
131 // TODO: This is a specific form of a much more general pattern.
132 //       We could detect a select with any binop identity constant, or we
133 //       could use SimplifyBinOp to see if either arm of the select reduces.
134 //       But that needs to be done carefully and/or while removing potential
135 //       reverse canonicalizations as in InstCombiner::foldSelectIntoOp().
136 static Value *foldMulSelectToNegate(BinaryOperator &I,
137                                     InstCombiner::BuilderTy &Builder) {
138   Value *Cond, *OtherOp;
139 
140   // mul (select Cond, 1, -1), OtherOp --> select Cond, OtherOp, -OtherOp
141   // mul OtherOp, (select Cond, 1, -1) --> select Cond, OtherOp, -OtherOp
142   if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_One(), m_AllOnes())),
143                         m_Value(OtherOp))))
144     return Builder.CreateSelect(Cond, OtherOp, Builder.CreateNeg(OtherOp));
145 
146   // mul (select Cond, -1, 1), OtherOp --> select Cond, -OtherOp, OtherOp
147   // mul OtherOp, (select Cond, -1, 1) --> select Cond, -OtherOp, OtherOp
148   if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_AllOnes(), m_One())),
149                         m_Value(OtherOp))))
150     return Builder.CreateSelect(Cond, Builder.CreateNeg(OtherOp), OtherOp);
151 
152   // fmul (select Cond, 1.0, -1.0), OtherOp --> select Cond, OtherOp, -OtherOp
153   // fmul OtherOp, (select Cond, 1.0, -1.0) --> select Cond, OtherOp, -OtherOp
154   if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(1.0),
155                                            m_SpecificFP(-1.0))),
156                          m_Value(OtherOp)))) {
157     IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
158     Builder.setFastMathFlags(I.getFastMathFlags());
159     return Builder.CreateSelect(Cond, OtherOp, Builder.CreateFNeg(OtherOp));
160   }
161 
162   // fmul (select Cond, -1.0, 1.0), OtherOp --> select Cond, -OtherOp, OtherOp
163   // fmul OtherOp, (select Cond, -1.0, 1.0) --> select Cond, -OtherOp, OtherOp
164   if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(-1.0),
165                                            m_SpecificFP(1.0))),
166                          m_Value(OtherOp)))) {
167     IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
168     Builder.setFastMathFlags(I.getFastMathFlags());
169     return Builder.CreateSelect(Cond, Builder.CreateFNeg(OtherOp), OtherOp);
170   }
171 
172   return nullptr;
173 }
174 
175 Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
176   if (Value *V = SimplifyMulInst(I.getOperand(0), I.getOperand(1),
177                                  SQ.getWithInstruction(&I)))
178     return replaceInstUsesWith(I, V);
179 
180   if (SimplifyAssociativeOrCommutative(I))
181     return &I;
182 
183   if (Instruction *X = foldVectorBinop(I))
184     return X;
185 
186   if (Value *V = SimplifyUsingDistributiveLaws(I))
187     return replaceInstUsesWith(I, V);
188 
189   // X * -1 == 0 - X
190   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
191   if (match(Op1, m_AllOnes())) {
192     BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName());
193     if (I.hasNoSignedWrap())
194       BO->setHasNoSignedWrap();
195     return BO;
196   }
197 
198   // Also allow combining multiply instructions on vectors.
199   {
200     Value *NewOp;
201     Constant *C1, *C2;
202     const APInt *IVal;
203     if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)),
204                         m_Constant(C1))) &&
205         match(C1, m_APInt(IVal))) {
206       // ((X << C2)*C1) == (X * (C1 << C2))
207       Constant *Shl = ConstantExpr::getShl(C1, C2);
208       BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0));
209       BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl);
210       if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap())
211         BO->setHasNoUnsignedWrap();
212       if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() &&
213           Shl->isNotMinSignedValue())
214         BO->setHasNoSignedWrap();
215       return BO;
216     }
217 
218     if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) {
219       // Replace X*(2^C) with X << C, where C is either a scalar or a vector.
220       if (Constant *NewCst = getLogBase2(NewOp->getType(), C1)) {
221         BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst);
222 
223         if (I.hasNoUnsignedWrap())
224           Shl->setHasNoUnsignedWrap();
225         if (I.hasNoSignedWrap()) {
226           const APInt *V;
227           if (match(NewCst, m_APInt(V)) && *V != V->getBitWidth() - 1)
228             Shl->setHasNoSignedWrap();
229         }
230 
231         return Shl;
232       }
233     }
234   }
235 
236   if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
237     // (Y - X) * (-(2**n)) -> (X - Y) * (2**n), for positive nonzero n
238     // (Y + const) * (-(2**n)) -> (-constY) * (2**n), for positive nonzero n
239     // The "* (2**n)" thus becomes a potential shifting opportunity.
240     {
241       const APInt &   Val = CI->getValue();
242       const APInt &PosVal = Val.abs();
243       if (Val.isNegative() && PosVal.isPowerOf2()) {
244         Value *X = nullptr, *Y = nullptr;
245         if (Op0->hasOneUse()) {
246           ConstantInt *C1;
247           Value *Sub = nullptr;
248           if (match(Op0, m_Sub(m_Value(Y), m_Value(X))))
249             Sub = Builder.CreateSub(X, Y, "suba");
250           else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1))))
251             Sub = Builder.CreateSub(Builder.CreateNeg(C1), Y, "subc");
252           if (Sub)
253             return
254               BinaryOperator::CreateMul(Sub,
255                                         ConstantInt::get(Y->getType(), PosVal));
256         }
257       }
258     }
259   }
260 
261   if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
262     return FoldedMul;
263 
264   if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
265     return replaceInstUsesWith(I, FoldedMul);
266 
267   // Simplify mul instructions with a constant RHS.
268   if (isa<Constant>(Op1)) {
269     // Canonicalize (X+C1)*CI -> X*CI+C1*CI.
270     Value *X;
271     Constant *C1;
272     if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) {
273       Value *Mul = Builder.CreateMul(C1, Op1);
274       // Only go forward with the transform if C1*CI simplifies to a tidier
275       // constant.
276       if (!match(Mul, m_Mul(m_Value(), m_Value())))
277         return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul);
278     }
279   }
280 
281   // abs(X) * abs(X) -> X * X
282   // nabs(X) * nabs(X) -> X * X
283   if (Op0 == Op1) {
284     Value *X, *Y;
285     SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
286     if (SPF == SPF_ABS || SPF == SPF_NABS)
287       return BinaryOperator::CreateMul(X, X);
288   }
289 
290   // -X * C --> X * -C
291   Value *X, *Y;
292   Constant *Op1C;
293   if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C)))
294     return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C));
295 
296   // -X * -Y --> X * Y
297   if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) {
298     auto *NewMul = BinaryOperator::CreateMul(X, Y);
299     if (I.hasNoSignedWrap() &&
300         cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() &&
301         cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap())
302       NewMul->setHasNoSignedWrap();
303     return NewMul;
304   }
305 
306   // -X * Y --> -(X * Y)
307   // X * -Y --> -(X * Y)
308   if (match(&I, m_c_Mul(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))
309     return BinaryOperator::CreateNeg(Builder.CreateMul(X, Y));
310 
311   // (X / Y) *  Y = X - (X % Y)
312   // (X / Y) * -Y = (X % Y) - X
313   {
314     Value *Y = Op1;
315     BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0);
316     if (!Div || (Div->getOpcode() != Instruction::UDiv &&
317                  Div->getOpcode() != Instruction::SDiv)) {
318       Y = Op0;
319       Div = dyn_cast<BinaryOperator>(Op1);
320     }
321     Value *Neg = dyn_castNegVal(Y);
322     if (Div && Div->hasOneUse() &&
323         (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) &&
324         (Div->getOpcode() == Instruction::UDiv ||
325          Div->getOpcode() == Instruction::SDiv)) {
326       Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1);
327 
328       // If the division is exact, X % Y is zero, so we end up with X or -X.
329       if (Div->isExact()) {
330         if (DivOp1 == Y)
331           return replaceInstUsesWith(I, X);
332         return BinaryOperator::CreateNeg(X);
333       }
334 
335       auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem
336                                                           : Instruction::SRem;
337       Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1);
338       if (DivOp1 == Y)
339         return BinaryOperator::CreateSub(X, Rem);
340       return BinaryOperator::CreateSub(Rem, X);
341     }
342   }
343 
344   /// i1 mul -> i1 and.
345   if (I.getType()->isIntOrIntVectorTy(1))
346     return BinaryOperator::CreateAnd(Op0, Op1);
347 
348   // X*(1 << Y) --> X << Y
349   // (1 << Y)*X --> X << Y
350   {
351     Value *Y;
352     BinaryOperator *BO = nullptr;
353     bool ShlNSW = false;
354     if (match(Op0, m_Shl(m_One(), m_Value(Y)))) {
355       BO = BinaryOperator::CreateShl(Op1, Y);
356       ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap();
357     } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) {
358       BO = BinaryOperator::CreateShl(Op0, Y);
359       ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap();
360     }
361     if (BO) {
362       if (I.hasNoUnsignedWrap())
363         BO->setHasNoUnsignedWrap();
364       if (I.hasNoSignedWrap() && ShlNSW)
365         BO->setHasNoSignedWrap();
366       return BO;
367     }
368   }
369 
370   // (zext bool X) * (zext bool Y) --> zext (and X, Y)
371   // (sext bool X) * (sext bool Y) --> zext (and X, Y)
372   // Note: -1 * -1 == 1 * 1 == 1 (if the extends match, the result is the same)
373   if (((match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) ||
374        (match(Op0, m_SExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) &&
375       X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
376       (Op0->hasOneUse() || Op1->hasOneUse())) {
377     Value *And = Builder.CreateAnd(X, Y, "mulbool");
378     return CastInst::Create(Instruction::ZExt, And, I.getType());
379   }
380   // (sext bool X) * (zext bool Y) --> sext (and X, Y)
381   // (zext bool X) * (sext bool Y) --> sext (and X, Y)
382   // Note: -1 * 1 == 1 * -1  == -1
383   if (((match(Op0, m_SExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) ||
384        (match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) &&
385       X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
386       (Op0->hasOneUse() || Op1->hasOneUse())) {
387     Value *And = Builder.CreateAnd(X, Y, "mulbool");
388     return CastInst::Create(Instruction::SExt, And, I.getType());
389   }
390 
391   // (bool X) * Y --> X ? Y : 0
392   // Y * (bool X) --> X ? Y : 0
393   if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
394     return SelectInst::Create(X, Op1, ConstantInt::get(I.getType(), 0));
395   if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
396     return SelectInst::Create(X, Op0, ConstantInt::get(I.getType(), 0));
397 
398   // (lshr X, 31) * Y --> (ashr X, 31) & Y
399   // Y * (lshr X, 31) --> (ashr X, 31) & Y
400   // TODO: We are not checking one-use because the elimination of the multiply
401   //       is better for analysis?
402   // TODO: Should we canonicalize to '(X < 0) ? Y : 0' instead? That would be
403   //       more similar to what we're doing above.
404   const APInt *C;
405   if (match(Op0, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1)
406     return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op1);
407   if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1)
408     return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0);
409 
410   if (Instruction *Ext = narrowMathIfNoOverflow(I))
411     return Ext;
412 
413   bool Changed = false;
414   if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) {
415     Changed = true;
416     I.setHasNoSignedWrap(true);
417   }
418 
419   if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) {
420     Changed = true;
421     I.setHasNoUnsignedWrap(true);
422   }
423 
424   return Changed ? &I : nullptr;
425 }
426 
427 Instruction *InstCombinerImpl::foldFPSignBitOps(BinaryOperator &I) {
428   BinaryOperator::BinaryOps Opcode = I.getOpcode();
429   assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&
430          "Expected fmul or fdiv");
431 
432   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
433   Value *X, *Y;
434 
435   // -X * -Y --> X * Y
436   // -X / -Y --> X / Y
437   if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
438     return BinaryOperator::CreateWithCopiedFlags(Opcode, X, Y, &I);
439 
440   // fabs(X) * fabs(X) -> X * X
441   // fabs(X) / fabs(X) -> X / X
442   if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::fabs>(m_Value(X))))
443     return BinaryOperator::CreateWithCopiedFlags(Opcode, X, X, &I);
444 
445   // fabs(X) * fabs(Y) --> fabs(X * Y)
446   // fabs(X) / fabs(Y) --> fabs(X / Y)
447   if (match(Op0, m_Intrinsic<Intrinsic::fabs>(m_Value(X))) &&
448       match(Op1, m_Intrinsic<Intrinsic::fabs>(m_Value(Y))) &&
449       (Op0->hasOneUse() || Op1->hasOneUse())) {
450     IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
451     Builder.setFastMathFlags(I.getFastMathFlags());
452     Value *XY = Builder.CreateBinOp(Opcode, X, Y);
453     Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, XY);
454     Fabs->takeName(&I);
455     return replaceInstUsesWith(I, Fabs);
456   }
457 
458   return nullptr;
459 }
460 
461 Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
462   if (Value *V = SimplifyFMulInst(I.getOperand(0), I.getOperand(1),
463                                   I.getFastMathFlags(),
464                                   SQ.getWithInstruction(&I)))
465     return replaceInstUsesWith(I, V);
466 
467   if (SimplifyAssociativeOrCommutative(I))
468     return &I;
469 
470   if (Instruction *X = foldVectorBinop(I))
471     return X;
472 
473   if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
474     return FoldedMul;
475 
476   if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
477     return replaceInstUsesWith(I, FoldedMul);
478 
479   if (Instruction *R = foldFPSignBitOps(I))
480     return R;
481 
482   // X * -1.0 --> -X
483   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
484   if (match(Op1, m_SpecificFP(-1.0)))
485     return UnaryOperator::CreateFNegFMF(Op0, &I);
486 
487   // -X * C --> X * -C
488   Value *X, *Y;
489   Constant *C;
490   if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C)))
491     return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I);
492 
493   // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E)
494   if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
495     return replaceInstUsesWith(I, V);
496 
497   if (I.hasAllowReassoc()) {
498     // Reassociate constant RHS with another constant to form constant
499     // expression.
500     if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) {
501       Constant *C1;
502       if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) {
503         // (C1 / X) * C --> (C * C1) / X
504         Constant *CC1 = ConstantExpr::getFMul(C, C1);
505         if (CC1->isNormalFP())
506           return BinaryOperator::CreateFDivFMF(CC1, X, &I);
507       }
508       if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) {
509         // (X / C1) * C --> X * (C / C1)
510         Constant *CDivC1 = ConstantExpr::getFDiv(C, C1);
511         if (CDivC1->isNormalFP())
512           return BinaryOperator::CreateFMulFMF(X, CDivC1, &I);
513 
514         // If the constant was a denormal, try reassociating differently.
515         // (X / C1) * C --> X / (C1 / C)
516         Constant *C1DivC = ConstantExpr::getFDiv(C1, C);
517         if (Op0->hasOneUse() && C1DivC->isNormalFP())
518           return BinaryOperator::CreateFDivFMF(X, C1DivC, &I);
519       }
520 
521       // We do not need to match 'fadd C, X' and 'fsub X, C' because they are
522       // canonicalized to 'fadd X, C'. Distributing the multiply may allow
523       // further folds and (X * C) + C2 is 'fma'.
524       if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) {
525         // (X + C1) * C --> (X * C) + (C * C1)
526         Constant *CC1 = ConstantExpr::getFMul(C, C1);
527         Value *XC = Builder.CreateFMulFMF(X, C, &I);
528         return BinaryOperator::CreateFAddFMF(XC, CC1, &I);
529       }
530       if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) {
531         // (C1 - X) * C --> (C * C1) - (X * C)
532         Constant *CC1 = ConstantExpr::getFMul(C, C1);
533         Value *XC = Builder.CreateFMulFMF(X, C, &I);
534         return BinaryOperator::CreateFSubFMF(CC1, XC, &I);
535       }
536     }
537 
538     Value *Z;
539     if (match(&I, m_c_FMul(m_OneUse(m_FDiv(m_Value(X), m_Value(Y))),
540                            m_Value(Z)))) {
541       // Sink division: (X / Y) * Z --> (X * Z) / Y
542       Value *NewFMul = Builder.CreateFMulFMF(X, Z, &I);
543       return BinaryOperator::CreateFDivFMF(NewFMul, Y, &I);
544     }
545 
546     // sqrt(X) * sqrt(Y) -> sqrt(X * Y)
547     // nnan disallows the possibility of returning a number if both operands are
548     // negative (in that case, we should return NaN).
549     if (I.hasNoNaNs() &&
550         match(Op0, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(X)))) &&
551         match(Op1, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) {
552       Value *XY = Builder.CreateFMulFMF(X, Y, &I);
553       Value *Sqrt = Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &I);
554       return replaceInstUsesWith(I, Sqrt);
555     }
556 
557     // Like the similar transform in instsimplify, this requires 'nsz' because
558     // sqrt(-0.0) = -0.0, and -0.0 * -0.0 does not simplify to -0.0.
559     if (I.hasNoNaNs() && I.hasNoSignedZeros() && Op0 == Op1 &&
560         Op0->hasNUses(2)) {
561       // Peek through fdiv to find squaring of square root:
562       // (X / sqrt(Y)) * (X / sqrt(Y)) --> (X * X) / Y
563       if (match(Op0, m_FDiv(m_Value(X),
564                             m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) {
565         Value *XX = Builder.CreateFMulFMF(X, X, &I);
566         return BinaryOperator::CreateFDivFMF(XX, Y, &I);
567       }
568       // (sqrt(Y) / X) * (sqrt(Y) / X) --> Y / (X * X)
569       if (match(Op0, m_FDiv(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y)),
570                             m_Value(X)))) {
571         Value *XX = Builder.CreateFMulFMF(X, X, &I);
572         return BinaryOperator::CreateFDivFMF(Y, XX, &I);
573       }
574     }
575 
576     // exp(X) * exp(Y) -> exp(X + Y)
577     // Match as long as at least one of exp has only one use.
578     if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) &&
579         match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y))) &&
580         (Op0->hasOneUse() || Op1->hasOneUse())) {
581       Value *XY = Builder.CreateFAddFMF(X, Y, &I);
582       Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &I);
583       return replaceInstUsesWith(I, Exp);
584     }
585 
586     // exp2(X) * exp2(Y) -> exp2(X + Y)
587     // Match as long as at least one of exp2 has only one use.
588     if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) &&
589         match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y))) &&
590         (Op0->hasOneUse() || Op1->hasOneUse())) {
591       Value *XY = Builder.CreateFAddFMF(X, Y, &I);
592       Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &I);
593       return replaceInstUsesWith(I, Exp2);
594     }
595 
596     // (X*Y) * X => (X*X) * Y where Y != X
597     //  The purpose is two-fold:
598     //   1) to form a power expression (of X).
599     //   2) potentially shorten the critical path: After transformation, the
600     //  latency of the instruction Y is amortized by the expression of X*X,
601     //  and therefore Y is in a "less critical" position compared to what it
602     //  was before the transformation.
603     if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) &&
604         Op1 != Y) {
605       Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I);
606       return BinaryOperator::CreateFMulFMF(XX, Y, &I);
607     }
608     if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) &&
609         Op0 != Y) {
610       Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I);
611       return BinaryOperator::CreateFMulFMF(XX, Y, &I);
612     }
613   }
614 
615   // log2(X * 0.5) * Y = log2(X) * Y - Y
616   if (I.isFast()) {
617     IntrinsicInst *Log2 = nullptr;
618     if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>(
619             m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
620       Log2 = cast<IntrinsicInst>(Op0);
621       Y = Op1;
622     }
623     if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>(
624             m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
625       Log2 = cast<IntrinsicInst>(Op1);
626       Y = Op0;
627     }
628     if (Log2) {
629       Value *Log2 = Builder.CreateUnaryIntrinsic(Intrinsic::log2, X, &I);
630       Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I);
631       return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I);
632     }
633   }
634 
635   return nullptr;
636 }
637 
638 /// Fold a divide or remainder with a select instruction divisor when one of the
639 /// select operands is zero. In that case, we can use the other select operand
640 /// because div/rem by zero is undefined.
641 bool InstCombinerImpl::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) {
642   SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1));
643   if (!SI)
644     return false;
645 
646   int NonNullOperand;
647   if (match(SI->getTrueValue(), m_Zero()))
648     // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
649     NonNullOperand = 2;
650   else if (match(SI->getFalseValue(), m_Zero()))
651     // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
652     NonNullOperand = 1;
653   else
654     return false;
655 
656   // Change the div/rem to use 'Y' instead of the select.
657   replaceOperand(I, 1, SI->getOperand(NonNullOperand));
658 
659   // Okay, we know we replace the operand of the div/rem with 'Y' with no
660   // problem.  However, the select, or the condition of the select may have
661   // multiple uses.  Based on our knowledge that the operand must be non-zero,
662   // propagate the known value for the select into other uses of it, and
663   // propagate a known value of the condition into its other users.
664 
665   // If the select and condition only have a single use, don't bother with this,
666   // early exit.
667   Value *SelectCond = SI->getCondition();
668   if (SI->use_empty() && SelectCond->hasOneUse())
669     return true;
670 
671   // Scan the current block backward, looking for other uses of SI.
672   BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin();
673   Type *CondTy = SelectCond->getType();
674   while (BBI != BBFront) {
675     --BBI;
676     // If we found an instruction that we can't assume will return, so
677     // information from below it cannot be propagated above it.
678     if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI))
679       break;
680 
681     // Replace uses of the select or its condition with the known values.
682     for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end();
683          I != E; ++I) {
684       if (*I == SI) {
685         replaceUse(*I, SI->getOperand(NonNullOperand));
686         Worklist.push(&*BBI);
687       } else if (*I == SelectCond) {
688         replaceUse(*I, NonNullOperand == 1 ? ConstantInt::getTrue(CondTy)
689                                            : ConstantInt::getFalse(CondTy));
690         Worklist.push(&*BBI);
691       }
692     }
693 
694     // If we past the instruction, quit looking for it.
695     if (&*BBI == SI)
696       SI = nullptr;
697     if (&*BBI == SelectCond)
698       SelectCond = nullptr;
699 
700     // If we ran out of things to eliminate, break out of the loop.
701     if (!SelectCond && !SI)
702       break;
703 
704   }
705   return true;
706 }
707 
708 /// True if the multiply can not be expressed in an int this size.
709 static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product,
710                               bool IsSigned) {
711   bool Overflow;
712   Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow);
713   return Overflow;
714 }
715 
716 /// True if C1 is a multiple of C2. Quotient contains C1/C2.
717 static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient,
718                        bool IsSigned) {
719   assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal");
720 
721   // Bail if we will divide by zero.
722   if (C2.isNullValue())
723     return false;
724 
725   // Bail if we would divide INT_MIN by -1.
726   if (IsSigned && C1.isMinSignedValue() && C2.isAllOnesValue())
727     return false;
728 
729   APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned);
730   if (IsSigned)
731     APInt::sdivrem(C1, C2, Quotient, Remainder);
732   else
733     APInt::udivrem(C1, C2, Quotient, Remainder);
734 
735   return Remainder.isMinValue();
736 }
737 
738 /// This function implements the transforms common to both integer division
739 /// instructions (udiv and sdiv). It is called by the visitors to those integer
740 /// division instructions.
741 /// Common integer divide transforms
742 Instruction *InstCombinerImpl::commonIDivTransforms(BinaryOperator &I) {
743   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
744   bool IsSigned = I.getOpcode() == Instruction::SDiv;
745   Type *Ty = I.getType();
746 
747   // The RHS is known non-zero.
748   if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I))
749     return replaceOperand(I, 1, V);
750 
751   // Handle cases involving: [su]div X, (select Cond, Y, Z)
752   // This does not apply for fdiv.
753   if (simplifyDivRemOfSelectWithZeroOp(I))
754     return &I;
755 
756   const APInt *C2;
757   if (match(Op1, m_APInt(C2))) {
758     Value *X;
759     const APInt *C1;
760 
761     // (X / C1) / C2  -> X / (C1*C2)
762     if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) ||
763         (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) {
764       APInt Product(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
765       if (!multiplyOverflows(*C1, *C2, Product, IsSigned))
766         return BinaryOperator::Create(I.getOpcode(), X,
767                                       ConstantInt::get(Ty, Product));
768     }
769 
770     if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) ||
771         (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) {
772       APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
773 
774       // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1.
775       if (isMultiple(*C2, *C1, Quotient, IsSigned)) {
776         auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X,
777                                               ConstantInt::get(Ty, Quotient));
778         NewDiv->setIsExact(I.isExact());
779         return NewDiv;
780       }
781 
782       // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2.
783       if (isMultiple(*C1, *C2, Quotient, IsSigned)) {
784         auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
785                                            ConstantInt::get(Ty, Quotient));
786         auto *OBO = cast<OverflowingBinaryOperator>(Op0);
787         Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
788         Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
789         return Mul;
790       }
791     }
792 
793     if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) &&
794          *C1 != C1->getBitWidth() - 1) ||
795         (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))))) {
796       APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
797       APInt C1Shifted = APInt::getOneBitSet(
798           C1->getBitWidth(), static_cast<unsigned>(C1->getLimitedValue()));
799 
800       // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1.
801       if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) {
802         auto *BO = BinaryOperator::Create(I.getOpcode(), X,
803                                           ConstantInt::get(Ty, Quotient));
804         BO->setIsExact(I.isExact());
805         return BO;
806       }
807 
808       // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2.
809       if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) {
810         auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
811                                            ConstantInt::get(Ty, Quotient));
812         auto *OBO = cast<OverflowingBinaryOperator>(Op0);
813         Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
814         Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
815         return Mul;
816       }
817     }
818 
819     if (!C2->isNullValue()) // avoid X udiv 0
820       if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I))
821         return FoldedDiv;
822   }
823 
824   if (match(Op0, m_One())) {
825     assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?");
826     if (IsSigned) {
827       // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the
828       // result is one, if Op1 is -1 then the result is minus one, otherwise
829       // it's zero.
830       Value *Inc = Builder.CreateAdd(Op1, Op0);
831       Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3));
832       return SelectInst::Create(Cmp, Op1, ConstantInt::get(Ty, 0));
833     } else {
834       // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
835       // result is one, otherwise it's zero.
836       return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty);
837     }
838   }
839 
840   // See if we can fold away this div instruction.
841   if (SimplifyDemandedInstructionBits(I))
842     return &I;
843 
844   // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
845   Value *X, *Z;
846   if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1
847     if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) ||
848         (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1)))))
849       return BinaryOperator::Create(I.getOpcode(), X, Op1);
850 
851   // (X << Y) / X -> 1 << Y
852   Value *Y;
853   if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y))))
854     return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y);
855   if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y))))
856     return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y);
857 
858   // X / (X * Y) -> 1 / Y if the multiplication does not overflow.
859   if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) {
860     bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap();
861     bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap();
862     if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) {
863       replaceOperand(I, 0, ConstantInt::get(Ty, 1));
864       replaceOperand(I, 1, Y);
865       return &I;
866     }
867   }
868 
869   return nullptr;
870 }
871 
872 static const unsigned MaxDepth = 6;
873 
874 namespace {
875 
876 using FoldUDivOperandCb = Instruction *(*)(Value *Op0, Value *Op1,
877                                            const BinaryOperator &I,
878                                            InstCombinerImpl &IC);
879 
880 /// Used to maintain state for visitUDivOperand().
881 struct UDivFoldAction {
882   /// Informs visitUDiv() how to fold this operand.  This can be zero if this
883   /// action joins two actions together.
884   FoldUDivOperandCb FoldAction;
885 
886   /// Which operand to fold.
887   Value *OperandToFold;
888 
889   union {
890     /// The instruction returned when FoldAction is invoked.
891     Instruction *FoldResult;
892 
893     /// Stores the LHS action index if this action joins two actions together.
894     size_t SelectLHSIdx;
895   };
896 
897   UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand)
898       : FoldAction(FA), OperandToFold(InputOperand), FoldResult(nullptr) {}
899   UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS)
900       : FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {}
901 };
902 
903 } // end anonymous namespace
904 
905 // X udiv 2^C -> X >> C
906 static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1,
907                                     const BinaryOperator &I,
908                                     InstCombinerImpl &IC) {
909   Constant *C1 = getLogBase2(Op0->getType(), cast<Constant>(Op1));
910   if (!C1)
911     llvm_unreachable("Failed to constant fold udiv -> logbase2");
912   BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, C1);
913   if (I.isExact())
914     LShr->setIsExact();
915   return LShr;
916 }
917 
918 // X udiv (C1 << N), where C1 is "1<<C2"  -->  X >> (N+C2)
919 // X udiv (zext (C1 << N)), where C1 is "1<<C2"  -->  X >> (N+C2)
920 static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I,
921                                 InstCombinerImpl &IC) {
922   Value *ShiftLeft;
923   if (!match(Op1, m_ZExt(m_Value(ShiftLeft))))
924     ShiftLeft = Op1;
925 
926   Constant *CI;
927   Value *N;
928   if (!match(ShiftLeft, m_Shl(m_Constant(CI), m_Value(N))))
929     llvm_unreachable("match should never fail here!");
930   Constant *Log2Base = getLogBase2(N->getType(), CI);
931   if (!Log2Base)
932     llvm_unreachable("getLogBase2 should never fail here!");
933   N = IC.Builder.CreateAdd(N, Log2Base);
934   if (Op1 != ShiftLeft)
935     N = IC.Builder.CreateZExt(N, Op1->getType());
936   BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N);
937   if (I.isExact())
938     LShr->setIsExact();
939   return LShr;
940 }
941 
942 // Recursively visits the possible right hand operands of a udiv
943 // instruction, seeing through select instructions, to determine if we can
944 // replace the udiv with something simpler.  If we find that an operand is not
945 // able to simplify the udiv, we abort the entire transformation.
946 static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I,
947                                SmallVectorImpl<UDivFoldAction> &Actions,
948                                unsigned Depth = 0) {
949   // Check to see if this is an unsigned division with an exact power of 2,
950   // if so, convert to a right shift.
951   if (match(Op1, m_Power2())) {
952     Actions.push_back(UDivFoldAction(foldUDivPow2Cst, Op1));
953     return Actions.size();
954   }
955 
956   // X udiv (C1 << N), where C1 is "1<<C2"  -->  X >> (N+C2)
957   if (match(Op1, m_Shl(m_Power2(), m_Value())) ||
958       match(Op1, m_ZExt(m_Shl(m_Power2(), m_Value())))) {
959     Actions.push_back(UDivFoldAction(foldUDivShl, Op1));
960     return Actions.size();
961   }
962 
963   // The remaining tests are all recursive, so bail out if we hit the limit.
964   if (Depth++ == MaxDepth)
965     return 0;
966 
967   if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
968     if (size_t LHSIdx =
969             visitUDivOperand(Op0, SI->getOperand(1), I, Actions, Depth))
970       if (visitUDivOperand(Op0, SI->getOperand(2), I, Actions, Depth)) {
971         Actions.push_back(UDivFoldAction(nullptr, Op1, LHSIdx - 1));
972         return Actions.size();
973       }
974 
975   return 0;
976 }
977 
978 /// If we have zero-extended operands of an unsigned div or rem, we may be able
979 /// to narrow the operation (sink the zext below the math).
980 static Instruction *narrowUDivURem(BinaryOperator &I,
981                                    InstCombiner::BuilderTy &Builder) {
982   Instruction::BinaryOps Opcode = I.getOpcode();
983   Value *N = I.getOperand(0);
984   Value *D = I.getOperand(1);
985   Type *Ty = I.getType();
986   Value *X, *Y;
987   if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) &&
988       X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) {
989     // udiv (zext X), (zext Y) --> zext (udiv X, Y)
990     // urem (zext X), (zext Y) --> zext (urem X, Y)
991     Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y);
992     return new ZExtInst(NarrowOp, Ty);
993   }
994 
995   Constant *C;
996   if ((match(N, m_OneUse(m_ZExt(m_Value(X)))) && match(D, m_Constant(C))) ||
997       (match(D, m_OneUse(m_ZExt(m_Value(X)))) && match(N, m_Constant(C)))) {
998     // If the constant is the same in the smaller type, use the narrow version.
999     Constant *TruncC = ConstantExpr::getTrunc(C, X->getType());
1000     if (ConstantExpr::getZExt(TruncC, Ty) != C)
1001       return nullptr;
1002 
1003     // udiv (zext X), C --> zext (udiv X, C')
1004     // urem (zext X), C --> zext (urem X, C')
1005     // udiv C, (zext X) --> zext (udiv C', X)
1006     // urem C, (zext X) --> zext (urem C', X)
1007     Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC)
1008                                        : Builder.CreateBinOp(Opcode, TruncC, X);
1009     return new ZExtInst(NarrowOp, Ty);
1010   }
1011 
1012   return nullptr;
1013 }
1014 
1015 Instruction *InstCombinerImpl::visitUDiv(BinaryOperator &I) {
1016   if (Value *V = SimplifyUDivInst(I.getOperand(0), I.getOperand(1),
1017                                   SQ.getWithInstruction(&I)))
1018     return replaceInstUsesWith(I, V);
1019 
1020   if (Instruction *X = foldVectorBinop(I))
1021     return X;
1022 
1023   // Handle the integer div common cases
1024   if (Instruction *Common = commonIDivTransforms(I))
1025     return Common;
1026 
1027   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1028   Value *X;
1029   const APInt *C1, *C2;
1030   if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && match(Op1, m_APInt(C2))) {
1031     // (X lshr C1) udiv C2 --> X udiv (C2 << C1)
1032     bool Overflow;
1033     APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow);
1034     if (!Overflow) {
1035       bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value()));
1036       BinaryOperator *BO = BinaryOperator::CreateUDiv(
1037           X, ConstantInt::get(X->getType(), C2ShlC1));
1038       if (IsExact)
1039         BO->setIsExact();
1040       return BO;
1041     }
1042   }
1043 
1044   // Op0 / C where C is large (negative) --> zext (Op0 >= C)
1045   // TODO: Could use isKnownNegative() to handle non-constant values.
1046   Type *Ty = I.getType();
1047   if (match(Op1, m_Negative())) {
1048     Value *Cmp = Builder.CreateICmpUGE(Op0, Op1);
1049     return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1050   }
1051   // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined)
1052   if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1053     Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
1054     return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1055   }
1056 
1057   if (Instruction *NarrowDiv = narrowUDivURem(I, Builder))
1058     return NarrowDiv;
1059 
1060   // If the udiv operands are non-overflowing multiplies with a common operand,
1061   // then eliminate the common factor:
1062   // (A * B) / (A * X) --> B / X (and commuted variants)
1063   // TODO: The code would be reduced if we had m_c_NUWMul pattern matching.
1064   // TODO: If -reassociation handled this generally, we could remove this.
1065   Value *A, *B;
1066   if (match(Op0, m_NUWMul(m_Value(A), m_Value(B)))) {
1067     if (match(Op1, m_NUWMul(m_Specific(A), m_Value(X))) ||
1068         match(Op1, m_NUWMul(m_Value(X), m_Specific(A))))
1069       return BinaryOperator::CreateUDiv(B, X);
1070     if (match(Op1, m_NUWMul(m_Specific(B), m_Value(X))) ||
1071         match(Op1, m_NUWMul(m_Value(X), m_Specific(B))))
1072       return BinaryOperator::CreateUDiv(A, X);
1073   }
1074 
1075   // (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...))))
1076   SmallVector<UDivFoldAction, 6> UDivActions;
1077   if (visitUDivOperand(Op0, Op1, I, UDivActions))
1078     for (unsigned i = 0, e = UDivActions.size(); i != e; ++i) {
1079       FoldUDivOperandCb Action = UDivActions[i].FoldAction;
1080       Value *ActionOp1 = UDivActions[i].OperandToFold;
1081       Instruction *Inst;
1082       if (Action)
1083         Inst = Action(Op0, ActionOp1, I, *this);
1084       else {
1085         // This action joins two actions together.  The RHS of this action is
1086         // simply the last action we processed, we saved the LHS action index in
1087         // the joining action.
1088         size_t SelectRHSIdx = i - 1;
1089         Value *SelectRHS = UDivActions[SelectRHSIdx].FoldResult;
1090         size_t SelectLHSIdx = UDivActions[i].SelectLHSIdx;
1091         Value *SelectLHS = UDivActions[SelectLHSIdx].FoldResult;
1092         Inst = SelectInst::Create(cast<SelectInst>(ActionOp1)->getCondition(),
1093                                   SelectLHS, SelectRHS);
1094       }
1095 
1096       // If this is the last action to process, return it to the InstCombiner.
1097       // Otherwise, we insert it before the UDiv and record it so that we may
1098       // use it as part of a joining action (i.e., a SelectInst).
1099       if (e - i != 1) {
1100         Inst->insertBefore(&I);
1101         UDivActions[i].FoldResult = Inst;
1102       } else
1103         return Inst;
1104     }
1105 
1106   return nullptr;
1107 }
1108 
1109 Instruction *InstCombinerImpl::visitSDiv(BinaryOperator &I) {
1110   if (Value *V = SimplifySDivInst(I.getOperand(0), I.getOperand(1),
1111                                   SQ.getWithInstruction(&I)))
1112     return replaceInstUsesWith(I, V);
1113 
1114   if (Instruction *X = foldVectorBinop(I))
1115     return X;
1116 
1117   // Handle the integer div common cases
1118   if (Instruction *Common = commonIDivTransforms(I))
1119     return Common;
1120 
1121   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1122   Value *X;
1123   // sdiv Op0, -1 --> -Op0
1124   // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined)
1125   if (match(Op1, m_AllOnes()) ||
1126       (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1127     return BinaryOperator::CreateNeg(Op0);
1128 
1129   // X / INT_MIN --> X == INT_MIN
1130   if (match(Op1, m_SignMask()))
1131     return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), I.getType());
1132 
1133   const APInt *Op1C;
1134   if (match(Op1, m_APInt(Op1C))) {
1135     // sdiv exact X, C  -->  ashr exact X, log2(C)
1136     if (I.isExact() && Op1C->isNonNegative() && Op1C->isPowerOf2()) {
1137       Value *ShAmt = ConstantInt::get(Op1->getType(), Op1C->exactLogBase2());
1138       return BinaryOperator::CreateExactAShr(Op0, ShAmt, I.getName());
1139     }
1140 
1141     // If the dividend is sign-extended and the constant divisor is small enough
1142     // to fit in the source type, shrink the division to the narrower type:
1143     // (sext X) sdiv C --> sext (X sdiv C)
1144     Value *Op0Src;
1145     if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) &&
1146         Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) {
1147 
1148       // In the general case, we need to make sure that the dividend is not the
1149       // minimum signed value because dividing that by -1 is UB. But here, we
1150       // know that the -1 divisor case is already handled above.
1151 
1152       Constant *NarrowDivisor =
1153           ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType());
1154       Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor);
1155       return new SExtInst(NarrowOp, Op0->getType());
1156     }
1157 
1158     // -X / C --> X / -C (if the negation doesn't overflow).
1159     // TODO: This could be enhanced to handle arbitrary vector constants by
1160     //       checking if all elements are not the min-signed-val.
1161     if (!Op1C->isMinSignedValue() &&
1162         match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) {
1163       Constant *NegC = ConstantInt::get(I.getType(), -(*Op1C));
1164       Instruction *BO = BinaryOperator::CreateSDiv(X, NegC);
1165       BO->setIsExact(I.isExact());
1166       return BO;
1167     }
1168   }
1169 
1170   // -X / Y --> -(X / Y)
1171   Value *Y;
1172   if (match(&I, m_SDiv(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
1173     return BinaryOperator::CreateNSWNeg(
1174         Builder.CreateSDiv(X, Y, I.getName(), I.isExact()));
1175 
1176   // If the sign bits of both operands are zero (i.e. we can prove they are
1177   // unsigned inputs), turn this into a udiv.
1178   APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
1179   if (MaskedValueIsZero(Op0, Mask, 0, &I)) {
1180     if (MaskedValueIsZero(Op1, Mask, 0, &I)) {
1181       // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
1182       auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1183       BO->setIsExact(I.isExact());
1184       return BO;
1185     }
1186 
1187     if (match(Op1, m_NegatedPower2())) {
1188       // X sdiv (-(1 << C)) -> -(X sdiv (1 << C)) ->
1189       //                    -> -(X udiv (1 << C)) -> -(X u>> C)
1190       return BinaryOperator::CreateNeg(Builder.Insert(foldUDivPow2Cst(
1191           Op0, ConstantExpr::getNeg(cast<Constant>(Op1)), I, *this)));
1192     }
1193 
1194     if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1195       // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
1196       // Safe because the only negative value (1 << Y) can take on is
1197       // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
1198       // the sign bit set.
1199       auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1200       BO->setIsExact(I.isExact());
1201       return BO;
1202     }
1203   }
1204 
1205   return nullptr;
1206 }
1207 
1208 /// Remove negation and try to convert division into multiplication.
1209 static Instruction *foldFDivConstantDivisor(BinaryOperator &I) {
1210   Constant *C;
1211   if (!match(I.getOperand(1), m_Constant(C)))
1212     return nullptr;
1213 
1214   // -X / C --> X / -C
1215   Value *X;
1216   if (match(I.getOperand(0), m_FNeg(m_Value(X))))
1217     return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I);
1218 
1219   // If the constant divisor has an exact inverse, this is always safe. If not,
1220   // then we can still create a reciprocal if fast-math-flags allow it and the
1221   // constant is a regular number (not zero, infinite, or denormal).
1222   if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP())))
1223     return nullptr;
1224 
1225   // Disallow denormal constants because we don't know what would happen
1226   // on all targets.
1227   // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1228   // denorms are flushed?
1229   auto *RecipC = ConstantExpr::getFDiv(ConstantFP::get(I.getType(), 1.0), C);
1230   if (!RecipC->isNormalFP())
1231     return nullptr;
1232 
1233   // X / C --> X * (1 / C)
1234   return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I);
1235 }
1236 
1237 /// Remove negation and try to reassociate constant math.
1238 static Instruction *foldFDivConstantDividend(BinaryOperator &I) {
1239   Constant *C;
1240   if (!match(I.getOperand(0), m_Constant(C)))
1241     return nullptr;
1242 
1243   // C / -X --> -C / X
1244   Value *X;
1245   if (match(I.getOperand(1), m_FNeg(m_Value(X))))
1246     return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I);
1247 
1248   if (!I.hasAllowReassoc() || !I.hasAllowReciprocal())
1249     return nullptr;
1250 
1251   // Try to reassociate C / X expressions where X includes another constant.
1252   Constant *C2, *NewC = nullptr;
1253   if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) {
1254     // C / (X * C2) --> (C / C2) / X
1255     NewC = ConstantExpr::getFDiv(C, C2);
1256   } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) {
1257     // C / (X / C2) --> (C * C2) / X
1258     NewC = ConstantExpr::getFMul(C, C2);
1259   }
1260   // Disallow denormal constants because we don't know what would happen
1261   // on all targets.
1262   // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1263   // denorms are flushed?
1264   if (!NewC || !NewC->isNormalFP())
1265     return nullptr;
1266 
1267   return BinaryOperator::CreateFDivFMF(NewC, X, &I);
1268 }
1269 
1270 Instruction *InstCombinerImpl::visitFDiv(BinaryOperator &I) {
1271   if (Value *V = SimplifyFDivInst(I.getOperand(0), I.getOperand(1),
1272                                   I.getFastMathFlags(),
1273                                   SQ.getWithInstruction(&I)))
1274     return replaceInstUsesWith(I, V);
1275 
1276   if (Instruction *X = foldVectorBinop(I))
1277     return X;
1278 
1279   if (Instruction *R = foldFDivConstantDivisor(I))
1280     return R;
1281 
1282   if (Instruction *R = foldFDivConstantDividend(I))
1283     return R;
1284 
1285   if (Instruction *R = foldFPSignBitOps(I))
1286     return R;
1287 
1288   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1289   if (isa<Constant>(Op0))
1290     if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1291       if (Instruction *R = FoldOpIntoSelect(I, SI))
1292         return R;
1293 
1294   if (isa<Constant>(Op1))
1295     if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1296       if (Instruction *R = FoldOpIntoSelect(I, SI))
1297         return R;
1298 
1299   if (I.hasAllowReassoc() && I.hasAllowReciprocal()) {
1300     Value *X, *Y;
1301     if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1302         (!isa<Constant>(Y) || !isa<Constant>(Op1))) {
1303       // (X / Y) / Z => X / (Y * Z)
1304       Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I);
1305       return BinaryOperator::CreateFDivFMF(X, YZ, &I);
1306     }
1307     if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1308         (!isa<Constant>(Y) || !isa<Constant>(Op0))) {
1309       // Z / (X / Y) => (Y * Z) / X
1310       Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I);
1311       return BinaryOperator::CreateFDivFMF(YZ, X, &I);
1312     }
1313     // Z / (1.0 / Y) => (Y * Z)
1314     //
1315     // This is a special case of Z / (X / Y) => (Y * Z) / X, with X = 1.0. The
1316     // m_OneUse check is avoided because even in the case of the multiple uses
1317     // for 1.0/Y, the number of instructions remain the same and a division is
1318     // replaced by a multiplication.
1319     if (match(Op1, m_FDiv(m_SpecificFP(1.0), m_Value(Y))))
1320       return BinaryOperator::CreateFMulFMF(Y, Op0, &I);
1321   }
1322 
1323   if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) {
1324     // sin(X) / cos(X) -> tan(X)
1325     // cos(X) / sin(X) -> 1/tan(X) (cotangent)
1326     Value *X;
1327     bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) &&
1328                  match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X)));
1329     bool IsCot =
1330         !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) &&
1331                   match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X)));
1332 
1333     if ((IsTan || IsCot) &&
1334         hasFloatFn(&TLI, I.getType(), LibFunc_tan, LibFunc_tanf, LibFunc_tanl)) {
1335       IRBuilder<> B(&I);
1336       IRBuilder<>::FastMathFlagGuard FMFGuard(B);
1337       B.setFastMathFlags(I.getFastMathFlags());
1338       AttributeList Attrs =
1339           cast<CallBase>(Op0)->getCalledFunction()->getAttributes();
1340       Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf,
1341                                         LibFunc_tanl, B, Attrs);
1342       if (IsCot)
1343         Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res);
1344       return replaceInstUsesWith(I, Res);
1345     }
1346   }
1347 
1348   // X / (X * Y) --> 1.0 / Y
1349   // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed.
1350   // We can ignore the possibility that X is infinity because INF/INF is NaN.
1351   Value *X, *Y;
1352   if (I.hasNoNaNs() && I.hasAllowReassoc() &&
1353       match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) {
1354     replaceOperand(I, 0, ConstantFP::get(I.getType(), 1.0));
1355     replaceOperand(I, 1, Y);
1356     return &I;
1357   }
1358 
1359   // X / fabs(X) -> copysign(1.0, X)
1360   // fabs(X) / X -> copysign(1.0, X)
1361   if (I.hasNoNaNs() && I.hasNoInfs() &&
1362       (match(&I,
1363              m_FDiv(m_Value(X), m_Intrinsic<Intrinsic::fabs>(m_Deferred(X)))) ||
1364        match(&I, m_FDiv(m_Intrinsic<Intrinsic::fabs>(m_Value(X)),
1365                         m_Deferred(X))))) {
1366     Value *V = Builder.CreateBinaryIntrinsic(
1367         Intrinsic::copysign, ConstantFP::get(I.getType(), 1.0), X, &I);
1368     return replaceInstUsesWith(I, V);
1369   }
1370   return nullptr;
1371 }
1372 
1373 /// This function implements the transforms common to both integer remainder
1374 /// instructions (urem and srem). It is called by the visitors to those integer
1375 /// remainder instructions.
1376 /// Common integer remainder transforms
1377 Instruction *InstCombinerImpl::commonIRemTransforms(BinaryOperator &I) {
1378   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1379 
1380   // The RHS is known non-zero.
1381   if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I))
1382     return replaceOperand(I, 1, V);
1383 
1384   // Handle cases involving: rem X, (select Cond, Y, Z)
1385   if (simplifyDivRemOfSelectWithZeroOp(I))
1386     return &I;
1387 
1388   if (isa<Constant>(Op1)) {
1389     if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
1390       if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
1391         if (Instruction *R = FoldOpIntoSelect(I, SI))
1392           return R;
1393       } else if (auto *PN = dyn_cast<PHINode>(Op0I)) {
1394         const APInt *Op1Int;
1395         if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() &&
1396             (I.getOpcode() == Instruction::URem ||
1397              !Op1Int->isMinSignedValue())) {
1398           // foldOpIntoPhi will speculate instructions to the end of the PHI's
1399           // predecessor blocks, so do this only if we know the srem or urem
1400           // will not fault.
1401           if (Instruction *NV = foldOpIntoPhi(I, PN))
1402             return NV;
1403         }
1404       }
1405 
1406       // See if we can fold away this rem instruction.
1407       if (SimplifyDemandedInstructionBits(I))
1408         return &I;
1409     }
1410   }
1411 
1412   return nullptr;
1413 }
1414 
1415 Instruction *InstCombinerImpl::visitURem(BinaryOperator &I) {
1416   if (Value *V = SimplifyURemInst(I.getOperand(0), I.getOperand(1),
1417                                   SQ.getWithInstruction(&I)))
1418     return replaceInstUsesWith(I, V);
1419 
1420   if (Instruction *X = foldVectorBinop(I))
1421     return X;
1422 
1423   if (Instruction *common = commonIRemTransforms(I))
1424     return common;
1425 
1426   if (Instruction *NarrowRem = narrowUDivURem(I, Builder))
1427     return NarrowRem;
1428 
1429   // X urem Y -> X and Y-1, where Y is a power of 2,
1430   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1431   Type *Ty = I.getType();
1432   if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1433     // This may increase instruction count, we don't enforce that Y is a
1434     // constant.
1435     Constant *N1 = Constant::getAllOnesValue(Ty);
1436     Value *Add = Builder.CreateAdd(Op1, N1);
1437     return BinaryOperator::CreateAnd(Op0, Add);
1438   }
1439 
1440   // 1 urem X -> zext(X != 1)
1441   if (match(Op0, m_One())) {
1442     Value *Cmp = Builder.CreateICmpNE(Op1, ConstantInt::get(Ty, 1));
1443     return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1444   }
1445 
1446   // X urem C -> X < C ? X : X - C, where C >= signbit.
1447   if (match(Op1, m_Negative())) {
1448     Value *Cmp = Builder.CreateICmpULT(Op0, Op1);
1449     Value *Sub = Builder.CreateSub(Op0, Op1);
1450     return SelectInst::Create(Cmp, Op0, Sub);
1451   }
1452 
1453   // If the divisor is a sext of a boolean, then the divisor must be max
1454   // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also
1455   // max unsigned value. In that case, the remainder is 0:
1456   // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0
1457   Value *X;
1458   if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1459     Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
1460     return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Op0);
1461   }
1462 
1463   return nullptr;
1464 }
1465 
1466 Instruction *InstCombinerImpl::visitSRem(BinaryOperator &I) {
1467   if (Value *V = SimplifySRemInst(I.getOperand(0), I.getOperand(1),
1468                                   SQ.getWithInstruction(&I)))
1469     return replaceInstUsesWith(I, V);
1470 
1471   if (Instruction *X = foldVectorBinop(I))
1472     return X;
1473 
1474   // Handle the integer rem common cases
1475   if (Instruction *Common = commonIRemTransforms(I))
1476     return Common;
1477 
1478   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1479   {
1480     const APInt *Y;
1481     // X % -Y -> X % Y
1482     if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue())
1483       return replaceOperand(I, 1, ConstantInt::get(I.getType(), -*Y));
1484   }
1485 
1486   // -X srem Y --> -(X srem Y)
1487   Value *X, *Y;
1488   if (match(&I, m_SRem(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
1489     return BinaryOperator::CreateNSWNeg(Builder.CreateSRem(X, Y));
1490 
1491   // If the sign bits of both operands are zero (i.e. we can prove they are
1492   // unsigned inputs), turn this into a urem.
1493   APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
1494   if (MaskedValueIsZero(Op1, Mask, 0, &I) &&
1495       MaskedValueIsZero(Op0, Mask, 0, &I)) {
1496     // X srem Y -> X urem Y, iff X and Y don't have sign bit set
1497     return BinaryOperator::CreateURem(Op0, Op1, I.getName());
1498   }
1499 
1500   // If it's a constant vector, flip any negative values positive.
1501   if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) {
1502     Constant *C = cast<Constant>(Op1);
1503     unsigned VWidth = cast<VectorType>(C->getType())->getNumElements();
1504 
1505     bool hasNegative = false;
1506     bool hasMissing = false;
1507     for (unsigned i = 0; i != VWidth; ++i) {
1508       Constant *Elt = C->getAggregateElement(i);
1509       if (!Elt) {
1510         hasMissing = true;
1511         break;
1512       }
1513 
1514       if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt))
1515         if (RHS->isNegative())
1516           hasNegative = true;
1517     }
1518 
1519     if (hasNegative && !hasMissing) {
1520       SmallVector<Constant *, 16> Elts(VWidth);
1521       for (unsigned i = 0; i != VWidth; ++i) {
1522         Elts[i] = C->getAggregateElement(i);  // Handle undef, etc.
1523         if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) {
1524           if (RHS->isNegative())
1525             Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
1526         }
1527       }
1528 
1529       Constant *NewRHSV = ConstantVector::get(Elts);
1530       if (NewRHSV != C)  // Don't loop on -MININT
1531         return replaceOperand(I, 1, NewRHSV);
1532     }
1533   }
1534 
1535   return nullptr;
1536 }
1537 
1538 Instruction *InstCombinerImpl::visitFRem(BinaryOperator &I) {
1539   if (Value *V = SimplifyFRemInst(I.getOperand(0), I.getOperand(1),
1540                                   I.getFastMathFlags(),
1541                                   SQ.getWithInstruction(&I)))
1542     return replaceInstUsesWith(I, V);
1543 
1544   if (Instruction *X = foldVectorBinop(I))
1545     return X;
1546 
1547   return nullptr;
1548 }
1549