1 //===- InstCombineMulDivRem.cpp -------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv, 10 // srem, urem, frem. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/IR/BasicBlock.h" 20 #include "llvm/IR/Constant.h" 21 #include "llvm/IR/Constants.h" 22 #include "llvm/IR/InstrTypes.h" 23 #include "llvm/IR/Instruction.h" 24 #include "llvm/IR/Instructions.h" 25 #include "llvm/IR/IntrinsicInst.h" 26 #include "llvm/IR/Intrinsics.h" 27 #include "llvm/IR/Operator.h" 28 #include "llvm/IR/PatternMatch.h" 29 #include "llvm/IR/Type.h" 30 #include "llvm/IR/Value.h" 31 #include "llvm/Support/Casting.h" 32 #include "llvm/Support/ErrorHandling.h" 33 #include "llvm/Support/KnownBits.h" 34 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 35 #include "llvm/Transforms/InstCombine/InstCombiner.h" 36 #include "llvm/Transforms/Utils/BuildLibCalls.h" 37 #include <cassert> 38 #include <cstddef> 39 #include <cstdint> 40 #include <utility> 41 42 using namespace llvm; 43 using namespace PatternMatch; 44 45 #define DEBUG_TYPE "instcombine" 46 47 /// The specific integer value is used in a context where it is known to be 48 /// non-zero. If this allows us to simplify the computation, do so and return 49 /// the new operand, otherwise return null. 50 static Value *simplifyValueKnownNonZero(Value *V, InstCombinerImpl &IC, 51 Instruction &CxtI) { 52 // If V has multiple uses, then we would have to do more analysis to determine 53 // if this is safe. For example, the use could be in dynamically unreached 54 // code. 55 if (!V->hasOneUse()) return nullptr; 56 57 bool MadeChange = false; 58 59 // ((1 << A) >>u B) --> (1 << (A-B)) 60 // Because V cannot be zero, we know that B is less than A. 61 Value *A = nullptr, *B = nullptr, *One = nullptr; 62 if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) && 63 match(One, m_One())) { 64 A = IC.Builder.CreateSub(A, B); 65 return IC.Builder.CreateShl(One, A); 66 } 67 68 // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it 69 // inexact. Similarly for <<. 70 BinaryOperator *I = dyn_cast<BinaryOperator>(V); 71 if (I && I->isLogicalShift() && 72 IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) { 73 // We know that this is an exact/nuw shift and that the input is a 74 // non-zero context as well. 75 if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) { 76 IC.replaceOperand(*I, 0, V2); 77 MadeChange = true; 78 } 79 80 if (I->getOpcode() == Instruction::LShr && !I->isExact()) { 81 I->setIsExact(); 82 MadeChange = true; 83 } 84 85 if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) { 86 I->setHasNoUnsignedWrap(); 87 MadeChange = true; 88 } 89 } 90 91 // TODO: Lots more we could do here: 92 // If V is a phi node, we can call this on each of its operands. 93 // "select cond, X, 0" can simplify to "X". 94 95 return MadeChange ? V : nullptr; 96 } 97 98 // TODO: This is a specific form of a much more general pattern. 99 // We could detect a select with any binop identity constant, or we 100 // could use SimplifyBinOp to see if either arm of the select reduces. 101 // But that needs to be done carefully and/or while removing potential 102 // reverse canonicalizations as in InstCombiner::foldSelectIntoOp(). 103 static Value *foldMulSelectToNegate(BinaryOperator &I, 104 InstCombiner::BuilderTy &Builder) { 105 Value *Cond, *OtherOp; 106 107 // mul (select Cond, 1, -1), OtherOp --> select Cond, OtherOp, -OtherOp 108 // mul OtherOp, (select Cond, 1, -1) --> select Cond, OtherOp, -OtherOp 109 if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_One(), m_AllOnes())), 110 m_Value(OtherOp)))) 111 return Builder.CreateSelect(Cond, OtherOp, Builder.CreateNeg(OtherOp)); 112 113 // mul (select Cond, -1, 1), OtherOp --> select Cond, -OtherOp, OtherOp 114 // mul OtherOp, (select Cond, -1, 1) --> select Cond, -OtherOp, OtherOp 115 if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_AllOnes(), m_One())), 116 m_Value(OtherOp)))) 117 return Builder.CreateSelect(Cond, Builder.CreateNeg(OtherOp), OtherOp); 118 119 // fmul (select Cond, 1.0, -1.0), OtherOp --> select Cond, OtherOp, -OtherOp 120 // fmul OtherOp, (select Cond, 1.0, -1.0) --> select Cond, OtherOp, -OtherOp 121 if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(1.0), 122 m_SpecificFP(-1.0))), 123 m_Value(OtherOp)))) { 124 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); 125 Builder.setFastMathFlags(I.getFastMathFlags()); 126 return Builder.CreateSelect(Cond, OtherOp, Builder.CreateFNeg(OtherOp)); 127 } 128 129 // fmul (select Cond, -1.0, 1.0), OtherOp --> select Cond, -OtherOp, OtherOp 130 // fmul OtherOp, (select Cond, -1.0, 1.0) --> select Cond, -OtherOp, OtherOp 131 if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(-1.0), 132 m_SpecificFP(1.0))), 133 m_Value(OtherOp)))) { 134 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); 135 Builder.setFastMathFlags(I.getFastMathFlags()); 136 return Builder.CreateSelect(Cond, Builder.CreateFNeg(OtherOp), OtherOp); 137 } 138 139 return nullptr; 140 } 141 142 Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) { 143 if (Value *V = SimplifyMulInst(I.getOperand(0), I.getOperand(1), 144 SQ.getWithInstruction(&I))) 145 return replaceInstUsesWith(I, V); 146 147 if (SimplifyAssociativeOrCommutative(I)) 148 return &I; 149 150 if (Instruction *X = foldVectorBinop(I)) 151 return X; 152 153 if (Value *V = SimplifyUsingDistributiveLaws(I)) 154 return replaceInstUsesWith(I, V); 155 156 // X * -1 == 0 - X 157 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 158 if (match(Op1, m_AllOnes())) { 159 BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName()); 160 if (I.hasNoSignedWrap()) 161 BO->setHasNoSignedWrap(); 162 return BO; 163 } 164 165 // Also allow combining multiply instructions on vectors. 166 { 167 Value *NewOp; 168 Constant *C1, *C2; 169 const APInt *IVal; 170 if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)), 171 m_Constant(C1))) && 172 match(C1, m_APInt(IVal))) { 173 // ((X << C2)*C1) == (X * (C1 << C2)) 174 Constant *Shl = ConstantExpr::getShl(C1, C2); 175 BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0)); 176 BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl); 177 if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap()) 178 BO->setHasNoUnsignedWrap(); 179 if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() && 180 Shl->isNotMinSignedValue()) 181 BO->setHasNoSignedWrap(); 182 return BO; 183 } 184 185 if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) { 186 // Replace X*(2^C) with X << C, where C is either a scalar or a vector. 187 if (Constant *NewCst = ConstantExpr::getExactLogBase2(C1)) { 188 BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst); 189 190 if (I.hasNoUnsignedWrap()) 191 Shl->setHasNoUnsignedWrap(); 192 if (I.hasNoSignedWrap()) { 193 const APInt *V; 194 if (match(NewCst, m_APInt(V)) && *V != V->getBitWidth() - 1) 195 Shl->setHasNoSignedWrap(); 196 } 197 198 return Shl; 199 } 200 } 201 } 202 203 if (Op0->hasOneUse() && match(Op1, m_NegatedPower2())) { 204 // Interpret X * (-1<<C) as (-X) * (1<<C) and try to sink the negation. 205 // The "* (1<<C)" thus becomes a potential shifting opportunity. 206 if (Value *NegOp0 = Negator::Negate(/*IsNegation*/ true, Op0, *this)) 207 return BinaryOperator::CreateMul( 208 NegOp0, ConstantExpr::getNeg(cast<Constant>(Op1)), I.getName()); 209 } 210 211 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I)) 212 return FoldedMul; 213 214 if (Value *FoldedMul = foldMulSelectToNegate(I, Builder)) 215 return replaceInstUsesWith(I, FoldedMul); 216 217 // Simplify mul instructions with a constant RHS. 218 if (isa<Constant>(Op1)) { 219 // Canonicalize (X+C1)*CI -> X*CI+C1*CI. 220 Value *X; 221 Constant *C1; 222 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) { 223 Value *Mul = Builder.CreateMul(C1, Op1); 224 // Only go forward with the transform if C1*CI simplifies to a tidier 225 // constant. 226 if (!match(Mul, m_Mul(m_Value(), m_Value()))) 227 return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul); 228 } 229 } 230 231 // abs(X) * abs(X) -> X * X 232 // nabs(X) * nabs(X) -> X * X 233 if (Op0 == Op1) { 234 Value *X, *Y; 235 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor; 236 if (SPF == SPF_ABS || SPF == SPF_NABS) 237 return BinaryOperator::CreateMul(X, X); 238 239 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X)))) 240 return BinaryOperator::CreateMul(X, X); 241 } 242 243 // -X * C --> X * -C 244 Value *X, *Y; 245 Constant *Op1C; 246 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C))) 247 return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C)); 248 249 // -X * -Y --> X * Y 250 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) { 251 auto *NewMul = BinaryOperator::CreateMul(X, Y); 252 if (I.hasNoSignedWrap() && 253 cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() && 254 cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap()) 255 NewMul->setHasNoSignedWrap(); 256 return NewMul; 257 } 258 259 // -X * Y --> -(X * Y) 260 // X * -Y --> -(X * Y) 261 if (match(&I, m_c_Mul(m_OneUse(m_Neg(m_Value(X))), m_Value(Y)))) 262 return BinaryOperator::CreateNeg(Builder.CreateMul(X, Y)); 263 264 // (X / Y) * Y = X - (X % Y) 265 // (X / Y) * -Y = (X % Y) - X 266 { 267 Value *Y = Op1; 268 BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0); 269 if (!Div || (Div->getOpcode() != Instruction::UDiv && 270 Div->getOpcode() != Instruction::SDiv)) { 271 Y = Op0; 272 Div = dyn_cast<BinaryOperator>(Op1); 273 } 274 Value *Neg = dyn_castNegVal(Y); 275 if (Div && Div->hasOneUse() && 276 (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) && 277 (Div->getOpcode() == Instruction::UDiv || 278 Div->getOpcode() == Instruction::SDiv)) { 279 Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1); 280 281 // If the division is exact, X % Y is zero, so we end up with X or -X. 282 if (Div->isExact()) { 283 if (DivOp1 == Y) 284 return replaceInstUsesWith(I, X); 285 return BinaryOperator::CreateNeg(X); 286 } 287 288 auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem 289 : Instruction::SRem; 290 Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1); 291 if (DivOp1 == Y) 292 return BinaryOperator::CreateSub(X, Rem); 293 return BinaryOperator::CreateSub(Rem, X); 294 } 295 } 296 297 /// i1 mul -> i1 and. 298 if (I.getType()->isIntOrIntVectorTy(1)) 299 return BinaryOperator::CreateAnd(Op0, Op1); 300 301 // X*(1 << Y) --> X << Y 302 // (1 << Y)*X --> X << Y 303 { 304 Value *Y; 305 BinaryOperator *BO = nullptr; 306 bool ShlNSW = false; 307 if (match(Op0, m_Shl(m_One(), m_Value(Y)))) { 308 BO = BinaryOperator::CreateShl(Op1, Y); 309 ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap(); 310 } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) { 311 BO = BinaryOperator::CreateShl(Op0, Y); 312 ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap(); 313 } 314 if (BO) { 315 if (I.hasNoUnsignedWrap()) 316 BO->setHasNoUnsignedWrap(); 317 if (I.hasNoSignedWrap() && ShlNSW) 318 BO->setHasNoSignedWrap(); 319 return BO; 320 } 321 } 322 323 // (zext bool X) * (zext bool Y) --> zext (and X, Y) 324 // (sext bool X) * (sext bool Y) --> zext (and X, Y) 325 // Note: -1 * -1 == 1 * 1 == 1 (if the extends match, the result is the same) 326 if (((match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) || 327 (match(Op0, m_SExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) && 328 X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() && 329 (Op0->hasOneUse() || Op1->hasOneUse())) { 330 Value *And = Builder.CreateAnd(X, Y, "mulbool"); 331 return CastInst::Create(Instruction::ZExt, And, I.getType()); 332 } 333 // (sext bool X) * (zext bool Y) --> sext (and X, Y) 334 // (zext bool X) * (sext bool Y) --> sext (and X, Y) 335 // Note: -1 * 1 == 1 * -1 == -1 336 if (((match(Op0, m_SExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) || 337 (match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) && 338 X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() && 339 (Op0->hasOneUse() || Op1->hasOneUse())) { 340 Value *And = Builder.CreateAnd(X, Y, "mulbool"); 341 return CastInst::Create(Instruction::SExt, And, I.getType()); 342 } 343 344 // (bool X) * Y --> X ? Y : 0 345 // Y * (bool X) --> X ? Y : 0 346 if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 347 return SelectInst::Create(X, Op1, ConstantInt::get(I.getType(), 0)); 348 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 349 return SelectInst::Create(X, Op0, ConstantInt::get(I.getType(), 0)); 350 351 // (lshr X, 31) * Y --> (ashr X, 31) & Y 352 // Y * (lshr X, 31) --> (ashr X, 31) & Y 353 // TODO: We are not checking one-use because the elimination of the multiply 354 // is better for analysis? 355 // TODO: Should we canonicalize to '(X < 0) ? Y : 0' instead? That would be 356 // more similar to what we're doing above. 357 const APInt *C; 358 if (match(Op0, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) 359 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op1); 360 if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) 361 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0); 362 363 if (Instruction *Ext = narrowMathIfNoOverflow(I)) 364 return Ext; 365 366 bool Changed = false; 367 if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) { 368 Changed = true; 369 I.setHasNoSignedWrap(true); 370 } 371 372 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) { 373 Changed = true; 374 I.setHasNoUnsignedWrap(true); 375 } 376 377 return Changed ? &I : nullptr; 378 } 379 380 Instruction *InstCombinerImpl::foldFPSignBitOps(BinaryOperator &I) { 381 BinaryOperator::BinaryOps Opcode = I.getOpcode(); 382 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) && 383 "Expected fmul or fdiv"); 384 385 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 386 Value *X, *Y; 387 388 // -X * -Y --> X * Y 389 // -X / -Y --> X / Y 390 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) 391 return BinaryOperator::CreateWithCopiedFlags(Opcode, X, Y, &I); 392 393 // fabs(X) * fabs(X) -> X * X 394 // fabs(X) / fabs(X) -> X / X 395 if (Op0 == Op1 && match(Op0, m_FAbs(m_Value(X)))) 396 return BinaryOperator::CreateWithCopiedFlags(Opcode, X, X, &I); 397 398 // fabs(X) * fabs(Y) --> fabs(X * Y) 399 // fabs(X) / fabs(Y) --> fabs(X / Y) 400 if (match(Op0, m_FAbs(m_Value(X))) && match(Op1, m_FAbs(m_Value(Y))) && 401 (Op0->hasOneUse() || Op1->hasOneUse())) { 402 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder); 403 Builder.setFastMathFlags(I.getFastMathFlags()); 404 Value *XY = Builder.CreateBinOp(Opcode, X, Y); 405 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, XY); 406 Fabs->takeName(&I); 407 return replaceInstUsesWith(I, Fabs); 408 } 409 410 return nullptr; 411 } 412 413 Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) { 414 if (Value *V = SimplifyFMulInst(I.getOperand(0), I.getOperand(1), 415 I.getFastMathFlags(), 416 SQ.getWithInstruction(&I))) 417 return replaceInstUsesWith(I, V); 418 419 if (SimplifyAssociativeOrCommutative(I)) 420 return &I; 421 422 if (Instruction *X = foldVectorBinop(I)) 423 return X; 424 425 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I)) 426 return FoldedMul; 427 428 if (Value *FoldedMul = foldMulSelectToNegate(I, Builder)) 429 return replaceInstUsesWith(I, FoldedMul); 430 431 if (Instruction *R = foldFPSignBitOps(I)) 432 return R; 433 434 // X * -1.0 --> -X 435 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 436 if (match(Op1, m_SpecificFP(-1.0))) 437 return UnaryOperator::CreateFNegFMF(Op0, &I); 438 439 // -X * C --> X * -C 440 Value *X, *Y; 441 Constant *C; 442 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C))) 443 return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I); 444 445 // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E) 446 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1)) 447 return replaceInstUsesWith(I, V); 448 449 if (I.hasAllowReassoc()) { 450 // Reassociate constant RHS with another constant to form constant 451 // expression. 452 if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) { 453 Constant *C1; 454 if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) { 455 // (C1 / X) * C --> (C * C1) / X 456 Constant *CC1 = ConstantExpr::getFMul(C, C1); 457 if (CC1->isNormalFP()) 458 return BinaryOperator::CreateFDivFMF(CC1, X, &I); 459 } 460 if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) { 461 // (X / C1) * C --> X * (C / C1) 462 Constant *CDivC1 = ConstantExpr::getFDiv(C, C1); 463 if (CDivC1->isNormalFP()) 464 return BinaryOperator::CreateFMulFMF(X, CDivC1, &I); 465 466 // If the constant was a denormal, try reassociating differently. 467 // (X / C1) * C --> X / (C1 / C) 468 Constant *C1DivC = ConstantExpr::getFDiv(C1, C); 469 if (Op0->hasOneUse() && C1DivC->isNormalFP()) 470 return BinaryOperator::CreateFDivFMF(X, C1DivC, &I); 471 } 472 473 // We do not need to match 'fadd C, X' and 'fsub X, C' because they are 474 // canonicalized to 'fadd X, C'. Distributing the multiply may allow 475 // further folds and (X * C) + C2 is 'fma'. 476 if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) { 477 // (X + C1) * C --> (X * C) + (C * C1) 478 Constant *CC1 = ConstantExpr::getFMul(C, C1); 479 Value *XC = Builder.CreateFMulFMF(X, C, &I); 480 return BinaryOperator::CreateFAddFMF(XC, CC1, &I); 481 } 482 if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) { 483 // (C1 - X) * C --> (C * C1) - (X * C) 484 Constant *CC1 = ConstantExpr::getFMul(C, C1); 485 Value *XC = Builder.CreateFMulFMF(X, C, &I); 486 return BinaryOperator::CreateFSubFMF(CC1, XC, &I); 487 } 488 } 489 490 Value *Z; 491 if (match(&I, m_c_FMul(m_OneUse(m_FDiv(m_Value(X), m_Value(Y))), 492 m_Value(Z)))) { 493 // Sink division: (X / Y) * Z --> (X * Z) / Y 494 Value *NewFMul = Builder.CreateFMulFMF(X, Z, &I); 495 return BinaryOperator::CreateFDivFMF(NewFMul, Y, &I); 496 } 497 498 // sqrt(X) * sqrt(Y) -> sqrt(X * Y) 499 // nnan disallows the possibility of returning a number if both operands are 500 // negative (in that case, we should return NaN). 501 if (I.hasNoNaNs() && 502 match(Op0, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(X)))) && 503 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) { 504 Value *XY = Builder.CreateFMulFMF(X, Y, &I); 505 Value *Sqrt = Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &I); 506 return replaceInstUsesWith(I, Sqrt); 507 } 508 509 // The following transforms are done irrespective of the number of uses 510 // for the expression "1.0/sqrt(X)". 511 // 1) 1.0/sqrt(X) * X -> X/sqrt(X) 512 // 2) X * 1.0/sqrt(X) -> X/sqrt(X) 513 // We always expect the backend to reduce X/sqrt(X) to sqrt(X), if it 514 // has the necessary (reassoc) fast-math-flags. 515 if (I.hasNoSignedZeros() && 516 match(Op0, (m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) && 517 match(Y, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) && Op1 == X) 518 return BinaryOperator::CreateFDivFMF(X, Y, &I); 519 if (I.hasNoSignedZeros() && 520 match(Op1, (m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) && 521 match(Y, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) && Op0 == X) 522 return BinaryOperator::CreateFDivFMF(X, Y, &I); 523 524 // Like the similar transform in instsimplify, this requires 'nsz' because 525 // sqrt(-0.0) = -0.0, and -0.0 * -0.0 does not simplify to -0.0. 526 if (I.hasNoNaNs() && I.hasNoSignedZeros() && Op0 == Op1 && 527 Op0->hasNUses(2)) { 528 // Peek through fdiv to find squaring of square root: 529 // (X / sqrt(Y)) * (X / sqrt(Y)) --> (X * X) / Y 530 if (match(Op0, m_FDiv(m_Value(X), 531 m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) { 532 Value *XX = Builder.CreateFMulFMF(X, X, &I); 533 return BinaryOperator::CreateFDivFMF(XX, Y, &I); 534 } 535 // (sqrt(Y) / X) * (sqrt(Y) / X) --> Y / (X * X) 536 if (match(Op0, m_FDiv(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y)), 537 m_Value(X)))) { 538 Value *XX = Builder.CreateFMulFMF(X, X, &I); 539 return BinaryOperator::CreateFDivFMF(Y, XX, &I); 540 } 541 } 542 543 // exp(X) * exp(Y) -> exp(X + Y) 544 // Match as long as at least one of exp has only one use. 545 if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) && 546 match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y))) && 547 (Op0->hasOneUse() || Op1->hasOneUse())) { 548 Value *XY = Builder.CreateFAddFMF(X, Y, &I); 549 Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &I); 550 return replaceInstUsesWith(I, Exp); 551 } 552 553 // exp2(X) * exp2(Y) -> exp2(X + Y) 554 // Match as long as at least one of exp2 has only one use. 555 if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) && 556 match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y))) && 557 (Op0->hasOneUse() || Op1->hasOneUse())) { 558 Value *XY = Builder.CreateFAddFMF(X, Y, &I); 559 Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &I); 560 return replaceInstUsesWith(I, Exp2); 561 } 562 563 // (X*Y) * X => (X*X) * Y where Y != X 564 // The purpose is two-fold: 565 // 1) to form a power expression (of X). 566 // 2) potentially shorten the critical path: After transformation, the 567 // latency of the instruction Y is amortized by the expression of X*X, 568 // and therefore Y is in a "less critical" position compared to what it 569 // was before the transformation. 570 if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) && 571 Op1 != Y) { 572 Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I); 573 return BinaryOperator::CreateFMulFMF(XX, Y, &I); 574 } 575 if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) && 576 Op0 != Y) { 577 Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I); 578 return BinaryOperator::CreateFMulFMF(XX, Y, &I); 579 } 580 } 581 582 // log2(X * 0.5) * Y = log2(X) * Y - Y 583 if (I.isFast()) { 584 IntrinsicInst *Log2 = nullptr; 585 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>( 586 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) { 587 Log2 = cast<IntrinsicInst>(Op0); 588 Y = Op1; 589 } 590 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>( 591 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) { 592 Log2 = cast<IntrinsicInst>(Op1); 593 Y = Op0; 594 } 595 if (Log2) { 596 Value *Log2 = Builder.CreateUnaryIntrinsic(Intrinsic::log2, X, &I); 597 Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I); 598 return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I); 599 } 600 } 601 602 return nullptr; 603 } 604 605 /// Fold a divide or remainder with a select instruction divisor when one of the 606 /// select operands is zero. In that case, we can use the other select operand 607 /// because div/rem by zero is undefined. 608 bool InstCombinerImpl::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) { 609 SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1)); 610 if (!SI) 611 return false; 612 613 int NonNullOperand; 614 if (match(SI->getTrueValue(), m_Zero())) 615 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y 616 NonNullOperand = 2; 617 else if (match(SI->getFalseValue(), m_Zero())) 618 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y 619 NonNullOperand = 1; 620 else 621 return false; 622 623 // Change the div/rem to use 'Y' instead of the select. 624 replaceOperand(I, 1, SI->getOperand(NonNullOperand)); 625 626 // Okay, we know we replace the operand of the div/rem with 'Y' with no 627 // problem. However, the select, or the condition of the select may have 628 // multiple uses. Based on our knowledge that the operand must be non-zero, 629 // propagate the known value for the select into other uses of it, and 630 // propagate a known value of the condition into its other users. 631 632 // If the select and condition only have a single use, don't bother with this, 633 // early exit. 634 Value *SelectCond = SI->getCondition(); 635 if (SI->use_empty() && SelectCond->hasOneUse()) 636 return true; 637 638 // Scan the current block backward, looking for other uses of SI. 639 BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin(); 640 Type *CondTy = SelectCond->getType(); 641 while (BBI != BBFront) { 642 --BBI; 643 // If we found an instruction that we can't assume will return, so 644 // information from below it cannot be propagated above it. 645 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI)) 646 break; 647 648 // Replace uses of the select or its condition with the known values. 649 for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end(); 650 I != E; ++I) { 651 if (*I == SI) { 652 replaceUse(*I, SI->getOperand(NonNullOperand)); 653 Worklist.push(&*BBI); 654 } else if (*I == SelectCond) { 655 replaceUse(*I, NonNullOperand == 1 ? ConstantInt::getTrue(CondTy) 656 : ConstantInt::getFalse(CondTy)); 657 Worklist.push(&*BBI); 658 } 659 } 660 661 // If we past the instruction, quit looking for it. 662 if (&*BBI == SI) 663 SI = nullptr; 664 if (&*BBI == SelectCond) 665 SelectCond = nullptr; 666 667 // If we ran out of things to eliminate, break out of the loop. 668 if (!SelectCond && !SI) 669 break; 670 671 } 672 return true; 673 } 674 675 /// True if the multiply can not be expressed in an int this size. 676 static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product, 677 bool IsSigned) { 678 bool Overflow; 679 Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow); 680 return Overflow; 681 } 682 683 /// True if C1 is a multiple of C2. Quotient contains C1/C2. 684 static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient, 685 bool IsSigned) { 686 assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal"); 687 688 // Bail if we will divide by zero. 689 if (C2.isNullValue()) 690 return false; 691 692 // Bail if we would divide INT_MIN by -1. 693 if (IsSigned && C1.isMinSignedValue() && C2.isAllOnesValue()) 694 return false; 695 696 APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned); 697 if (IsSigned) 698 APInt::sdivrem(C1, C2, Quotient, Remainder); 699 else 700 APInt::udivrem(C1, C2, Quotient, Remainder); 701 702 return Remainder.isMinValue(); 703 } 704 705 /// This function implements the transforms common to both integer division 706 /// instructions (udiv and sdiv). It is called by the visitors to those integer 707 /// division instructions. 708 /// Common integer divide transforms 709 Instruction *InstCombinerImpl::commonIDivTransforms(BinaryOperator &I) { 710 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 711 bool IsSigned = I.getOpcode() == Instruction::SDiv; 712 Type *Ty = I.getType(); 713 714 // The RHS is known non-zero. 715 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) 716 return replaceOperand(I, 1, V); 717 718 // Handle cases involving: [su]div X, (select Cond, Y, Z) 719 // This does not apply for fdiv. 720 if (simplifyDivRemOfSelectWithZeroOp(I)) 721 return &I; 722 723 const APInt *C2; 724 if (match(Op1, m_APInt(C2))) { 725 Value *X; 726 const APInt *C1; 727 728 // (X / C1) / C2 -> X / (C1*C2) 729 if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) || 730 (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) { 731 APInt Product(C1->getBitWidth(), /*val=*/0ULL, IsSigned); 732 if (!multiplyOverflows(*C1, *C2, Product, IsSigned)) 733 return BinaryOperator::Create(I.getOpcode(), X, 734 ConstantInt::get(Ty, Product)); 735 } 736 737 if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) || 738 (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) { 739 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned); 740 741 // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1. 742 if (isMultiple(*C2, *C1, Quotient, IsSigned)) { 743 auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X, 744 ConstantInt::get(Ty, Quotient)); 745 NewDiv->setIsExact(I.isExact()); 746 return NewDiv; 747 } 748 749 // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2. 750 if (isMultiple(*C1, *C2, Quotient, IsSigned)) { 751 auto *Mul = BinaryOperator::Create(Instruction::Mul, X, 752 ConstantInt::get(Ty, Quotient)); 753 auto *OBO = cast<OverflowingBinaryOperator>(Op0); 754 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap()); 755 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap()); 756 return Mul; 757 } 758 } 759 760 if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) && 761 *C1 != C1->getBitWidth() - 1) || 762 (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))))) { 763 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned); 764 APInt C1Shifted = APInt::getOneBitSet( 765 C1->getBitWidth(), static_cast<unsigned>(C1->getLimitedValue())); 766 767 // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1. 768 if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) { 769 auto *BO = BinaryOperator::Create(I.getOpcode(), X, 770 ConstantInt::get(Ty, Quotient)); 771 BO->setIsExact(I.isExact()); 772 return BO; 773 } 774 775 // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2. 776 if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) { 777 auto *Mul = BinaryOperator::Create(Instruction::Mul, X, 778 ConstantInt::get(Ty, Quotient)); 779 auto *OBO = cast<OverflowingBinaryOperator>(Op0); 780 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap()); 781 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap()); 782 return Mul; 783 } 784 } 785 786 if (!C2->isNullValue()) // avoid X udiv 0 787 if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I)) 788 return FoldedDiv; 789 } 790 791 if (match(Op0, m_One())) { 792 assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?"); 793 if (IsSigned) { 794 // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the 795 // result is one, if Op1 is -1 then the result is minus one, otherwise 796 // it's zero. 797 Value *Inc = Builder.CreateAdd(Op1, Op0); 798 Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3)); 799 return SelectInst::Create(Cmp, Op1, ConstantInt::get(Ty, 0)); 800 } else { 801 // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the 802 // result is one, otherwise it's zero. 803 return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty); 804 } 805 } 806 807 // See if we can fold away this div instruction. 808 if (SimplifyDemandedInstructionBits(I)) 809 return &I; 810 811 // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y 812 Value *X, *Z; 813 if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1 814 if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) || 815 (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1))))) 816 return BinaryOperator::Create(I.getOpcode(), X, Op1); 817 818 // (X << Y) / X -> 1 << Y 819 Value *Y; 820 if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y)))) 821 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y); 822 if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y)))) 823 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y); 824 825 // X / (X * Y) -> 1 / Y if the multiplication does not overflow. 826 if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) { 827 bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap(); 828 bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap(); 829 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) { 830 replaceOperand(I, 0, ConstantInt::get(Ty, 1)); 831 replaceOperand(I, 1, Y); 832 return &I; 833 } 834 } 835 836 return nullptr; 837 } 838 839 static const unsigned MaxDepth = 6; 840 841 namespace { 842 843 using FoldUDivOperandCb = Instruction *(*)(Value *Op0, Value *Op1, 844 const BinaryOperator &I, 845 InstCombinerImpl &IC); 846 847 /// Used to maintain state for visitUDivOperand(). 848 struct UDivFoldAction { 849 /// Informs visitUDiv() how to fold this operand. This can be zero if this 850 /// action joins two actions together. 851 FoldUDivOperandCb FoldAction; 852 853 /// Which operand to fold. 854 Value *OperandToFold; 855 856 union { 857 /// The instruction returned when FoldAction is invoked. 858 Instruction *FoldResult; 859 860 /// Stores the LHS action index if this action joins two actions together. 861 size_t SelectLHSIdx; 862 }; 863 864 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand) 865 : FoldAction(FA), OperandToFold(InputOperand), FoldResult(nullptr) {} 866 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS) 867 : FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {} 868 }; 869 870 } // end anonymous namespace 871 872 // X udiv 2^C -> X >> C 873 static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1, 874 const BinaryOperator &I, 875 InstCombinerImpl &IC) { 876 Constant *C1 = ConstantExpr::getExactLogBase2(cast<Constant>(Op1)); 877 if (!C1) 878 llvm_unreachable("Failed to constant fold udiv -> logbase2"); 879 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, C1); 880 if (I.isExact()) 881 LShr->setIsExact(); 882 return LShr; 883 } 884 885 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2) 886 // X udiv (zext (C1 << N)), where C1 is "1<<C2" --> X >> (N+C2) 887 static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I, 888 InstCombinerImpl &IC) { 889 Value *ShiftLeft; 890 if (!match(Op1, m_ZExt(m_Value(ShiftLeft)))) 891 ShiftLeft = Op1; 892 893 Constant *CI; 894 Value *N; 895 if (!match(ShiftLeft, m_Shl(m_Constant(CI), m_Value(N)))) 896 llvm_unreachable("match should never fail here!"); 897 Constant *Log2Base = ConstantExpr::getExactLogBase2(CI); 898 if (!Log2Base) 899 llvm_unreachable("getLogBase2 should never fail here!"); 900 N = IC.Builder.CreateAdd(N, Log2Base); 901 if (Op1 != ShiftLeft) 902 N = IC.Builder.CreateZExt(N, Op1->getType()); 903 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N); 904 if (I.isExact()) 905 LShr->setIsExact(); 906 return LShr; 907 } 908 909 // Recursively visits the possible right hand operands of a udiv 910 // instruction, seeing through select instructions, to determine if we can 911 // replace the udiv with something simpler. If we find that an operand is not 912 // able to simplify the udiv, we abort the entire transformation. 913 static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I, 914 SmallVectorImpl<UDivFoldAction> &Actions, 915 unsigned Depth = 0) { 916 // FIXME: assert that Op1 isn't/doesn't contain undef. 917 918 // Check to see if this is an unsigned division with an exact power of 2, 919 // if so, convert to a right shift. 920 if (match(Op1, m_Power2())) { 921 Actions.push_back(UDivFoldAction(foldUDivPow2Cst, Op1)); 922 return Actions.size(); 923 } 924 925 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2) 926 if (match(Op1, m_Shl(m_Power2(), m_Value())) || 927 match(Op1, m_ZExt(m_Shl(m_Power2(), m_Value())))) { 928 Actions.push_back(UDivFoldAction(foldUDivShl, Op1)); 929 return Actions.size(); 930 } 931 932 // The remaining tests are all recursive, so bail out if we hit the limit. 933 if (Depth++ == MaxDepth) 934 return 0; 935 936 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 937 // FIXME: missed optimization: if one of the hands of select is/contains 938 // undef, just directly pick the other one. 939 // FIXME: can both hands contain undef? 940 if (size_t LHSIdx = 941 visitUDivOperand(Op0, SI->getOperand(1), I, Actions, Depth)) 942 if (visitUDivOperand(Op0, SI->getOperand(2), I, Actions, Depth)) { 943 Actions.push_back(UDivFoldAction(nullptr, Op1, LHSIdx - 1)); 944 return Actions.size(); 945 } 946 947 return 0; 948 } 949 950 /// If we have zero-extended operands of an unsigned div or rem, we may be able 951 /// to narrow the operation (sink the zext below the math). 952 static Instruction *narrowUDivURem(BinaryOperator &I, 953 InstCombiner::BuilderTy &Builder) { 954 Instruction::BinaryOps Opcode = I.getOpcode(); 955 Value *N = I.getOperand(0); 956 Value *D = I.getOperand(1); 957 Type *Ty = I.getType(); 958 Value *X, *Y; 959 if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) && 960 X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) { 961 // udiv (zext X), (zext Y) --> zext (udiv X, Y) 962 // urem (zext X), (zext Y) --> zext (urem X, Y) 963 Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y); 964 return new ZExtInst(NarrowOp, Ty); 965 } 966 967 Constant *C; 968 if ((match(N, m_OneUse(m_ZExt(m_Value(X)))) && match(D, m_Constant(C))) || 969 (match(D, m_OneUse(m_ZExt(m_Value(X)))) && match(N, m_Constant(C)))) { 970 // If the constant is the same in the smaller type, use the narrow version. 971 Constant *TruncC = ConstantExpr::getTrunc(C, X->getType()); 972 if (ConstantExpr::getZExt(TruncC, Ty) != C) 973 return nullptr; 974 975 // udiv (zext X), C --> zext (udiv X, C') 976 // urem (zext X), C --> zext (urem X, C') 977 // udiv C, (zext X) --> zext (udiv C', X) 978 // urem C, (zext X) --> zext (urem C', X) 979 Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC) 980 : Builder.CreateBinOp(Opcode, TruncC, X); 981 return new ZExtInst(NarrowOp, Ty); 982 } 983 984 return nullptr; 985 } 986 987 Instruction *InstCombinerImpl::visitUDiv(BinaryOperator &I) { 988 if (Value *V = SimplifyUDivInst(I.getOperand(0), I.getOperand(1), 989 SQ.getWithInstruction(&I))) 990 return replaceInstUsesWith(I, V); 991 992 if (Instruction *X = foldVectorBinop(I)) 993 return X; 994 995 // Handle the integer div common cases 996 if (Instruction *Common = commonIDivTransforms(I)) 997 return Common; 998 999 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1000 Value *X; 1001 const APInt *C1, *C2; 1002 if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && match(Op1, m_APInt(C2))) { 1003 // (X lshr C1) udiv C2 --> X udiv (C2 << C1) 1004 bool Overflow; 1005 APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow); 1006 if (!Overflow) { 1007 bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value())); 1008 BinaryOperator *BO = BinaryOperator::CreateUDiv( 1009 X, ConstantInt::get(X->getType(), C2ShlC1)); 1010 if (IsExact) 1011 BO->setIsExact(); 1012 return BO; 1013 } 1014 } 1015 1016 // Op0 / C where C is large (negative) --> zext (Op0 >= C) 1017 // TODO: Could use isKnownNegative() to handle non-constant values. 1018 Type *Ty = I.getType(); 1019 if (match(Op1, m_Negative())) { 1020 Value *Cmp = Builder.CreateICmpUGE(Op0, Op1); 1021 return CastInst::CreateZExtOrBitCast(Cmp, Ty); 1022 } 1023 // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined) 1024 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { 1025 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty)); 1026 return CastInst::CreateZExtOrBitCast(Cmp, Ty); 1027 } 1028 1029 if (Instruction *NarrowDiv = narrowUDivURem(I, Builder)) 1030 return NarrowDiv; 1031 1032 // If the udiv operands are non-overflowing multiplies with a common operand, 1033 // then eliminate the common factor: 1034 // (A * B) / (A * X) --> B / X (and commuted variants) 1035 // TODO: The code would be reduced if we had m_c_NUWMul pattern matching. 1036 // TODO: If -reassociation handled this generally, we could remove this. 1037 Value *A, *B; 1038 if (match(Op0, m_NUWMul(m_Value(A), m_Value(B)))) { 1039 if (match(Op1, m_NUWMul(m_Specific(A), m_Value(X))) || 1040 match(Op1, m_NUWMul(m_Value(X), m_Specific(A)))) 1041 return BinaryOperator::CreateUDiv(B, X); 1042 if (match(Op1, m_NUWMul(m_Specific(B), m_Value(X))) || 1043 match(Op1, m_NUWMul(m_Value(X), m_Specific(B)))) 1044 return BinaryOperator::CreateUDiv(A, X); 1045 } 1046 1047 // (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...)))) 1048 SmallVector<UDivFoldAction, 6> UDivActions; 1049 if (visitUDivOperand(Op0, Op1, I, UDivActions)) 1050 for (unsigned i = 0, e = UDivActions.size(); i != e; ++i) { 1051 FoldUDivOperandCb Action = UDivActions[i].FoldAction; 1052 Value *ActionOp1 = UDivActions[i].OperandToFold; 1053 Instruction *Inst; 1054 if (Action) 1055 Inst = Action(Op0, ActionOp1, I, *this); 1056 else { 1057 // This action joins two actions together. The RHS of this action is 1058 // simply the last action we processed, we saved the LHS action index in 1059 // the joining action. 1060 size_t SelectRHSIdx = i - 1; 1061 Value *SelectRHS = UDivActions[SelectRHSIdx].FoldResult; 1062 size_t SelectLHSIdx = UDivActions[i].SelectLHSIdx; 1063 Value *SelectLHS = UDivActions[SelectLHSIdx].FoldResult; 1064 Inst = SelectInst::Create(cast<SelectInst>(ActionOp1)->getCondition(), 1065 SelectLHS, SelectRHS); 1066 } 1067 1068 // If this is the last action to process, return it to the InstCombiner. 1069 // Otherwise, we insert it before the UDiv and record it so that we may 1070 // use it as part of a joining action (i.e., a SelectInst). 1071 if (e - i != 1) { 1072 Inst->insertBefore(&I); 1073 UDivActions[i].FoldResult = Inst; 1074 } else 1075 return Inst; 1076 } 1077 1078 return nullptr; 1079 } 1080 1081 Instruction *InstCombinerImpl::visitSDiv(BinaryOperator &I) { 1082 if (Value *V = SimplifySDivInst(I.getOperand(0), I.getOperand(1), 1083 SQ.getWithInstruction(&I))) 1084 return replaceInstUsesWith(I, V); 1085 1086 if (Instruction *X = foldVectorBinop(I)) 1087 return X; 1088 1089 // Handle the integer div common cases 1090 if (Instruction *Common = commonIDivTransforms(I)) 1091 return Common; 1092 1093 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1094 Type *Ty = I.getType(); 1095 Value *X; 1096 // sdiv Op0, -1 --> -Op0 1097 // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined) 1098 if (match(Op1, m_AllOnes()) || 1099 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) 1100 return BinaryOperator::CreateNeg(Op0); 1101 1102 // X / INT_MIN --> X == INT_MIN 1103 if (match(Op1, m_SignMask())) 1104 return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), Ty); 1105 1106 // sdiv exact X, 1<<C --> ashr exact X, C iff 1<<C is non-negative 1107 // sdiv exact X, -1<<C --> -(ashr exact X, C) 1108 if (I.isExact() && ((match(Op1, m_Power2()) && match(Op1, m_NonNegative())) || 1109 match(Op1, m_NegatedPower2()))) { 1110 bool DivisorWasNegative = match(Op1, m_NegatedPower2()); 1111 if (DivisorWasNegative) 1112 Op1 = ConstantExpr::getNeg(cast<Constant>(Op1)); 1113 auto *AShr = BinaryOperator::CreateExactAShr( 1114 Op0, ConstantExpr::getExactLogBase2(cast<Constant>(Op1)), I.getName()); 1115 if (!DivisorWasNegative) 1116 return AShr; 1117 Builder.Insert(AShr); 1118 AShr->setName(I.getName() + ".neg"); 1119 return BinaryOperator::CreateNeg(AShr, I.getName()); 1120 } 1121 1122 const APInt *Op1C; 1123 if (match(Op1, m_APInt(Op1C))) { 1124 // If the dividend is sign-extended and the constant divisor is small enough 1125 // to fit in the source type, shrink the division to the narrower type: 1126 // (sext X) sdiv C --> sext (X sdiv C) 1127 Value *Op0Src; 1128 if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) && 1129 Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) { 1130 1131 // In the general case, we need to make sure that the dividend is not the 1132 // minimum signed value because dividing that by -1 is UB. But here, we 1133 // know that the -1 divisor case is already handled above. 1134 1135 Constant *NarrowDivisor = 1136 ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType()); 1137 Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor); 1138 return new SExtInst(NarrowOp, Ty); 1139 } 1140 1141 // -X / C --> X / -C (if the negation doesn't overflow). 1142 // TODO: This could be enhanced to handle arbitrary vector constants by 1143 // checking if all elements are not the min-signed-val. 1144 if (!Op1C->isMinSignedValue() && 1145 match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) { 1146 Constant *NegC = ConstantInt::get(Ty, -(*Op1C)); 1147 Instruction *BO = BinaryOperator::CreateSDiv(X, NegC); 1148 BO->setIsExact(I.isExact()); 1149 return BO; 1150 } 1151 } 1152 1153 // -X / Y --> -(X / Y) 1154 Value *Y; 1155 if (match(&I, m_SDiv(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y)))) 1156 return BinaryOperator::CreateNSWNeg( 1157 Builder.CreateSDiv(X, Y, I.getName(), I.isExact())); 1158 1159 // abs(X) / X --> X > -1 ? 1 : -1 1160 // X / abs(X) --> X > -1 ? 1 : -1 1161 if (match(&I, m_c_BinOp( 1162 m_OneUse(m_Intrinsic<Intrinsic::abs>(m_Value(X), m_One())), 1163 m_Deferred(X)))) { 1164 Constant *NegOne = ConstantInt::getAllOnesValue(Ty); 1165 Value *Cond = Builder.CreateICmpSGT(X, NegOne); 1166 return SelectInst::Create(Cond, ConstantInt::get(Ty, 1), NegOne); 1167 } 1168 1169 // If the sign bits of both operands are zero (i.e. we can prove they are 1170 // unsigned inputs), turn this into a udiv. 1171 APInt Mask(APInt::getSignMask(Ty->getScalarSizeInBits())); 1172 if (MaskedValueIsZero(Op0, Mask, 0, &I)) { 1173 if (MaskedValueIsZero(Op1, Mask, 0, &I)) { 1174 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set 1175 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); 1176 BO->setIsExact(I.isExact()); 1177 return BO; 1178 } 1179 1180 if (match(Op1, m_NegatedPower2())) { 1181 // X sdiv (-(1 << C)) -> -(X sdiv (1 << C)) -> 1182 // -> -(X udiv (1 << C)) -> -(X u>> C) 1183 return BinaryOperator::CreateNeg(Builder.Insert(foldUDivPow2Cst( 1184 Op0, ConstantExpr::getNeg(cast<Constant>(Op1)), I, *this))); 1185 } 1186 1187 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { 1188 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y) 1189 // Safe because the only negative value (1 << Y) can take on is 1190 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have 1191 // the sign bit set. 1192 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); 1193 BO->setIsExact(I.isExact()); 1194 return BO; 1195 } 1196 } 1197 1198 return nullptr; 1199 } 1200 1201 /// Remove negation and try to convert division into multiplication. 1202 static Instruction *foldFDivConstantDivisor(BinaryOperator &I) { 1203 Constant *C; 1204 if (!match(I.getOperand(1), m_Constant(C))) 1205 return nullptr; 1206 1207 // -X / C --> X / -C 1208 Value *X; 1209 if (match(I.getOperand(0), m_FNeg(m_Value(X)))) 1210 return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I); 1211 1212 // If the constant divisor has an exact inverse, this is always safe. If not, 1213 // then we can still create a reciprocal if fast-math-flags allow it and the 1214 // constant is a regular number (not zero, infinite, or denormal). 1215 if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP()))) 1216 return nullptr; 1217 1218 // Disallow denormal constants because we don't know what would happen 1219 // on all targets. 1220 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that 1221 // denorms are flushed? 1222 auto *RecipC = ConstantExpr::getFDiv(ConstantFP::get(I.getType(), 1.0), C); 1223 if (!RecipC->isNormalFP()) 1224 return nullptr; 1225 1226 // X / C --> X * (1 / C) 1227 return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I); 1228 } 1229 1230 /// Remove negation and try to reassociate constant math. 1231 static Instruction *foldFDivConstantDividend(BinaryOperator &I) { 1232 Constant *C; 1233 if (!match(I.getOperand(0), m_Constant(C))) 1234 return nullptr; 1235 1236 // C / -X --> -C / X 1237 Value *X; 1238 if (match(I.getOperand(1), m_FNeg(m_Value(X)))) 1239 return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I); 1240 1241 if (!I.hasAllowReassoc() || !I.hasAllowReciprocal()) 1242 return nullptr; 1243 1244 // Try to reassociate C / X expressions where X includes another constant. 1245 Constant *C2, *NewC = nullptr; 1246 if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) { 1247 // C / (X * C2) --> (C / C2) / X 1248 NewC = ConstantExpr::getFDiv(C, C2); 1249 } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) { 1250 // C / (X / C2) --> (C * C2) / X 1251 NewC = ConstantExpr::getFMul(C, C2); 1252 } 1253 // Disallow denormal constants because we don't know what would happen 1254 // on all targets. 1255 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that 1256 // denorms are flushed? 1257 if (!NewC || !NewC->isNormalFP()) 1258 return nullptr; 1259 1260 return BinaryOperator::CreateFDivFMF(NewC, X, &I); 1261 } 1262 1263 Instruction *InstCombinerImpl::visitFDiv(BinaryOperator &I) { 1264 if (Value *V = SimplifyFDivInst(I.getOperand(0), I.getOperand(1), 1265 I.getFastMathFlags(), 1266 SQ.getWithInstruction(&I))) 1267 return replaceInstUsesWith(I, V); 1268 1269 if (Instruction *X = foldVectorBinop(I)) 1270 return X; 1271 1272 if (Instruction *R = foldFDivConstantDivisor(I)) 1273 return R; 1274 1275 if (Instruction *R = foldFDivConstantDividend(I)) 1276 return R; 1277 1278 if (Instruction *R = foldFPSignBitOps(I)) 1279 return R; 1280 1281 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1282 if (isa<Constant>(Op0)) 1283 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 1284 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1285 return R; 1286 1287 if (isa<Constant>(Op1)) 1288 if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) 1289 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1290 return R; 1291 1292 if (I.hasAllowReassoc() && I.hasAllowReciprocal()) { 1293 Value *X, *Y; 1294 if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) && 1295 (!isa<Constant>(Y) || !isa<Constant>(Op1))) { 1296 // (X / Y) / Z => X / (Y * Z) 1297 Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I); 1298 return BinaryOperator::CreateFDivFMF(X, YZ, &I); 1299 } 1300 if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) && 1301 (!isa<Constant>(Y) || !isa<Constant>(Op0))) { 1302 // Z / (X / Y) => (Y * Z) / X 1303 Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I); 1304 return BinaryOperator::CreateFDivFMF(YZ, X, &I); 1305 } 1306 // Z / (1.0 / Y) => (Y * Z) 1307 // 1308 // This is a special case of Z / (X / Y) => (Y * Z) / X, with X = 1.0. The 1309 // m_OneUse check is avoided because even in the case of the multiple uses 1310 // for 1.0/Y, the number of instructions remain the same and a division is 1311 // replaced by a multiplication. 1312 if (match(Op1, m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) 1313 return BinaryOperator::CreateFMulFMF(Y, Op0, &I); 1314 } 1315 1316 if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) { 1317 // sin(X) / cos(X) -> tan(X) 1318 // cos(X) / sin(X) -> 1/tan(X) (cotangent) 1319 Value *X; 1320 bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) && 1321 match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X))); 1322 bool IsCot = 1323 !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) && 1324 match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X))); 1325 1326 if ((IsTan || IsCot) && 1327 hasFloatFn(&TLI, I.getType(), LibFunc_tan, LibFunc_tanf, LibFunc_tanl)) { 1328 IRBuilder<> B(&I); 1329 IRBuilder<>::FastMathFlagGuard FMFGuard(B); 1330 B.setFastMathFlags(I.getFastMathFlags()); 1331 AttributeList Attrs = 1332 cast<CallBase>(Op0)->getCalledFunction()->getAttributes(); 1333 Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf, 1334 LibFunc_tanl, B, Attrs); 1335 if (IsCot) 1336 Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res); 1337 return replaceInstUsesWith(I, Res); 1338 } 1339 } 1340 1341 // X / (X * Y) --> 1.0 / Y 1342 // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed. 1343 // We can ignore the possibility that X is infinity because INF/INF is NaN. 1344 Value *X, *Y; 1345 if (I.hasNoNaNs() && I.hasAllowReassoc() && 1346 match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) { 1347 replaceOperand(I, 0, ConstantFP::get(I.getType(), 1.0)); 1348 replaceOperand(I, 1, Y); 1349 return &I; 1350 } 1351 1352 // X / fabs(X) -> copysign(1.0, X) 1353 // fabs(X) / X -> copysign(1.0, X) 1354 if (I.hasNoNaNs() && I.hasNoInfs() && 1355 (match(&I, m_FDiv(m_Value(X), m_FAbs(m_Deferred(X)))) || 1356 match(&I, m_FDiv(m_FAbs(m_Value(X)), m_Deferred(X))))) { 1357 Value *V = Builder.CreateBinaryIntrinsic( 1358 Intrinsic::copysign, ConstantFP::get(I.getType(), 1.0), X, &I); 1359 return replaceInstUsesWith(I, V); 1360 } 1361 return nullptr; 1362 } 1363 1364 /// This function implements the transforms common to both integer remainder 1365 /// instructions (urem and srem). It is called by the visitors to those integer 1366 /// remainder instructions. 1367 /// Common integer remainder transforms 1368 Instruction *InstCombinerImpl::commonIRemTransforms(BinaryOperator &I) { 1369 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1370 1371 // The RHS is known non-zero. 1372 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) 1373 return replaceOperand(I, 1, V); 1374 1375 // Handle cases involving: rem X, (select Cond, Y, Z) 1376 if (simplifyDivRemOfSelectWithZeroOp(I)) 1377 return &I; 1378 1379 if (isa<Constant>(Op1)) { 1380 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) { 1381 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) { 1382 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1383 return R; 1384 } else if (auto *PN = dyn_cast<PHINode>(Op0I)) { 1385 const APInt *Op1Int; 1386 if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() && 1387 (I.getOpcode() == Instruction::URem || 1388 !Op1Int->isMinSignedValue())) { 1389 // foldOpIntoPhi will speculate instructions to the end of the PHI's 1390 // predecessor blocks, so do this only if we know the srem or urem 1391 // will not fault. 1392 if (Instruction *NV = foldOpIntoPhi(I, PN)) 1393 return NV; 1394 } 1395 } 1396 1397 // See if we can fold away this rem instruction. 1398 if (SimplifyDemandedInstructionBits(I)) 1399 return &I; 1400 } 1401 } 1402 1403 return nullptr; 1404 } 1405 1406 Instruction *InstCombinerImpl::visitURem(BinaryOperator &I) { 1407 if (Value *V = SimplifyURemInst(I.getOperand(0), I.getOperand(1), 1408 SQ.getWithInstruction(&I))) 1409 return replaceInstUsesWith(I, V); 1410 1411 if (Instruction *X = foldVectorBinop(I)) 1412 return X; 1413 1414 if (Instruction *common = commonIRemTransforms(I)) 1415 return common; 1416 1417 if (Instruction *NarrowRem = narrowUDivURem(I, Builder)) 1418 return NarrowRem; 1419 1420 // X urem Y -> X and Y-1, where Y is a power of 2, 1421 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1422 Type *Ty = I.getType(); 1423 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { 1424 // This may increase instruction count, we don't enforce that Y is a 1425 // constant. 1426 Constant *N1 = Constant::getAllOnesValue(Ty); 1427 Value *Add = Builder.CreateAdd(Op1, N1); 1428 return BinaryOperator::CreateAnd(Op0, Add); 1429 } 1430 1431 // 1 urem X -> zext(X != 1) 1432 if (match(Op0, m_One())) { 1433 Value *Cmp = Builder.CreateICmpNE(Op1, ConstantInt::get(Ty, 1)); 1434 return CastInst::CreateZExtOrBitCast(Cmp, Ty); 1435 } 1436 1437 // X urem C -> X < C ? X : X - C, where C >= signbit. 1438 if (match(Op1, m_Negative())) { 1439 Value *Cmp = Builder.CreateICmpULT(Op0, Op1); 1440 Value *Sub = Builder.CreateSub(Op0, Op1); 1441 return SelectInst::Create(Cmp, Op0, Sub); 1442 } 1443 1444 // If the divisor is a sext of a boolean, then the divisor must be max 1445 // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also 1446 // max unsigned value. In that case, the remainder is 0: 1447 // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0 1448 Value *X; 1449 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { 1450 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty)); 1451 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Op0); 1452 } 1453 1454 return nullptr; 1455 } 1456 1457 Instruction *InstCombinerImpl::visitSRem(BinaryOperator &I) { 1458 if (Value *V = SimplifySRemInst(I.getOperand(0), I.getOperand(1), 1459 SQ.getWithInstruction(&I))) 1460 return replaceInstUsesWith(I, V); 1461 1462 if (Instruction *X = foldVectorBinop(I)) 1463 return X; 1464 1465 // Handle the integer rem common cases 1466 if (Instruction *Common = commonIRemTransforms(I)) 1467 return Common; 1468 1469 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1470 { 1471 const APInt *Y; 1472 // X % -Y -> X % Y 1473 if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue()) 1474 return replaceOperand(I, 1, ConstantInt::get(I.getType(), -*Y)); 1475 } 1476 1477 // -X srem Y --> -(X srem Y) 1478 Value *X, *Y; 1479 if (match(&I, m_SRem(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y)))) 1480 return BinaryOperator::CreateNSWNeg(Builder.CreateSRem(X, Y)); 1481 1482 // If the sign bits of both operands are zero (i.e. we can prove they are 1483 // unsigned inputs), turn this into a urem. 1484 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); 1485 if (MaskedValueIsZero(Op1, Mask, 0, &I) && 1486 MaskedValueIsZero(Op0, Mask, 0, &I)) { 1487 // X srem Y -> X urem Y, iff X and Y don't have sign bit set 1488 return BinaryOperator::CreateURem(Op0, Op1, I.getName()); 1489 } 1490 1491 // If it's a constant vector, flip any negative values positive. 1492 if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) { 1493 Constant *C = cast<Constant>(Op1); 1494 unsigned VWidth = cast<FixedVectorType>(C->getType())->getNumElements(); 1495 1496 bool hasNegative = false; 1497 bool hasMissing = false; 1498 for (unsigned i = 0; i != VWidth; ++i) { 1499 Constant *Elt = C->getAggregateElement(i); 1500 if (!Elt) { 1501 hasMissing = true; 1502 break; 1503 } 1504 1505 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt)) 1506 if (RHS->isNegative()) 1507 hasNegative = true; 1508 } 1509 1510 if (hasNegative && !hasMissing) { 1511 SmallVector<Constant *, 16> Elts(VWidth); 1512 for (unsigned i = 0; i != VWidth; ++i) { 1513 Elts[i] = C->getAggregateElement(i); // Handle undef, etc. 1514 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) { 1515 if (RHS->isNegative()) 1516 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS)); 1517 } 1518 } 1519 1520 Constant *NewRHSV = ConstantVector::get(Elts); 1521 if (NewRHSV != C) // Don't loop on -MININT 1522 return replaceOperand(I, 1, NewRHSV); 1523 } 1524 } 1525 1526 return nullptr; 1527 } 1528 1529 Instruction *InstCombinerImpl::visitFRem(BinaryOperator &I) { 1530 if (Value *V = SimplifyFRemInst(I.getOperand(0), I.getOperand(1), 1531 I.getFastMathFlags(), 1532 SQ.getWithInstruction(&I))) 1533 return replaceInstUsesWith(I, V); 1534 1535 if (Instruction *X = foldVectorBinop(I)) 1536 return X; 1537 1538 return nullptr; 1539 } 1540