1 //===- InstCombineMulDivRem.cpp -------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv, 11 // srem, urem, frem. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "InstCombineInternal.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/IR/BasicBlock.h" 21 #include "llvm/IR/Constant.h" 22 #include "llvm/IR/Constants.h" 23 #include "llvm/IR/InstrTypes.h" 24 #include "llvm/IR/Instruction.h" 25 #include "llvm/IR/Instructions.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/IR/Intrinsics.h" 28 #include "llvm/IR/Operator.h" 29 #include "llvm/IR/PatternMatch.h" 30 #include "llvm/IR/Type.h" 31 #include "llvm/IR/Value.h" 32 #include "llvm/Support/Casting.h" 33 #include "llvm/Support/ErrorHandling.h" 34 #include "llvm/Support/KnownBits.h" 35 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 36 #include "llvm/Transforms/Utils/BuildLibCalls.h" 37 #include <cassert> 38 #include <cstddef> 39 #include <cstdint> 40 #include <utility> 41 42 using namespace llvm; 43 using namespace PatternMatch; 44 45 #define DEBUG_TYPE "instcombine" 46 47 /// The specific integer value is used in a context where it is known to be 48 /// non-zero. If this allows us to simplify the computation, do so and return 49 /// the new operand, otherwise return null. 50 static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC, 51 Instruction &CxtI) { 52 // If V has multiple uses, then we would have to do more analysis to determine 53 // if this is safe. For example, the use could be in dynamically unreached 54 // code. 55 if (!V->hasOneUse()) return nullptr; 56 57 bool MadeChange = false; 58 59 // ((1 << A) >>u B) --> (1 << (A-B)) 60 // Because V cannot be zero, we know that B is less than A. 61 Value *A = nullptr, *B = nullptr, *One = nullptr; 62 if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) && 63 match(One, m_One())) { 64 A = IC.Builder.CreateSub(A, B); 65 return IC.Builder.CreateShl(One, A); 66 } 67 68 // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it 69 // inexact. Similarly for <<. 70 BinaryOperator *I = dyn_cast<BinaryOperator>(V); 71 if (I && I->isLogicalShift() && 72 IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) { 73 // We know that this is an exact/nuw shift and that the input is a 74 // non-zero context as well. 75 if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) { 76 I->setOperand(0, V2); 77 MadeChange = true; 78 } 79 80 if (I->getOpcode() == Instruction::LShr && !I->isExact()) { 81 I->setIsExact(); 82 MadeChange = true; 83 } 84 85 if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) { 86 I->setHasNoUnsignedWrap(); 87 MadeChange = true; 88 } 89 } 90 91 // TODO: Lots more we could do here: 92 // If V is a phi node, we can call this on each of its operands. 93 // "select cond, X, 0" can simplify to "X". 94 95 return MadeChange ? V : nullptr; 96 } 97 98 /// \brief A helper routine of InstCombiner::visitMul(). 99 /// 100 /// If C is a scalar/vector of known powers of 2, then this function returns 101 /// a new scalar/vector obtained from logBase2 of C. 102 /// Return a null pointer otherwise. 103 static Constant *getLogBase2(Type *Ty, Constant *C) { 104 const APInt *IVal; 105 if (match(C, m_APInt(IVal)) && IVal->isPowerOf2()) 106 return ConstantInt::get(Ty, IVal->logBase2()); 107 108 if (!Ty->isVectorTy()) 109 return nullptr; 110 111 SmallVector<Constant *, 4> Elts; 112 for (unsigned I = 0, E = Ty->getVectorNumElements(); I != E; ++I) { 113 Constant *Elt = C->getAggregateElement(I); 114 if (!Elt) 115 return nullptr; 116 if (isa<UndefValue>(Elt)) { 117 Elts.push_back(UndefValue::get(Ty->getScalarType())); 118 continue; 119 } 120 if (!match(Elt, m_APInt(IVal)) || !IVal->isPowerOf2()) 121 return nullptr; 122 Elts.push_back(ConstantInt::get(Ty->getScalarType(), IVal->logBase2())); 123 } 124 125 return ConstantVector::get(Elts); 126 } 127 128 /// \brief Return true if we can prove that: 129 /// (mul LHS, RHS) === (mul nsw LHS, RHS) 130 bool InstCombiner::willNotOverflowSignedMul(const Value *LHS, 131 const Value *RHS, 132 const Instruction &CxtI) const { 133 // Multiplying n * m significant bits yields a result of n + m significant 134 // bits. If the total number of significant bits does not exceed the 135 // result bit width (minus 1), there is no overflow. 136 // This means if we have enough leading sign bits in the operands 137 // we can guarantee that the result does not overflow. 138 // Ref: "Hacker's Delight" by Henry Warren 139 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 140 141 // Note that underestimating the number of sign bits gives a more 142 // conservative answer. 143 unsigned SignBits = 144 ComputeNumSignBits(LHS, 0, &CxtI) + ComputeNumSignBits(RHS, 0, &CxtI); 145 146 // First handle the easy case: if we have enough sign bits there's 147 // definitely no overflow. 148 if (SignBits > BitWidth + 1) 149 return true; 150 151 // There are two ambiguous cases where there can be no overflow: 152 // SignBits == BitWidth + 1 and 153 // SignBits == BitWidth 154 // The second case is difficult to check, therefore we only handle the 155 // first case. 156 if (SignBits == BitWidth + 1) { 157 // It overflows only when both arguments are negative and the true 158 // product is exactly the minimum negative number. 159 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 160 // For simplicity we just check if at least one side is not negative. 161 KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, &CxtI); 162 KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, &CxtI); 163 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) 164 return true; 165 } 166 return false; 167 } 168 169 Instruction *InstCombiner::visitMul(BinaryOperator &I) { 170 bool Changed = SimplifyAssociativeOrCommutative(I); 171 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 172 173 if (Value *V = SimplifyVectorOp(I)) 174 return replaceInstUsesWith(I, V); 175 176 if (Value *V = SimplifyMulInst(Op0, Op1, SQ.getWithInstruction(&I))) 177 return replaceInstUsesWith(I, V); 178 179 if (Value *V = SimplifyUsingDistributiveLaws(I)) 180 return replaceInstUsesWith(I, V); 181 182 // X * -1 == 0 - X 183 if (match(Op1, m_AllOnes())) { 184 BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName()); 185 if (I.hasNoSignedWrap()) 186 BO->setHasNoSignedWrap(); 187 return BO; 188 } 189 190 // Also allow combining multiply instructions on vectors. 191 { 192 Value *NewOp; 193 Constant *C1, *C2; 194 const APInt *IVal; 195 if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)), 196 m_Constant(C1))) && 197 match(C1, m_APInt(IVal))) { 198 // ((X << C2)*C1) == (X * (C1 << C2)) 199 Constant *Shl = ConstantExpr::getShl(C1, C2); 200 BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0)); 201 BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl); 202 if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap()) 203 BO->setHasNoUnsignedWrap(); 204 if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() && 205 Shl->isNotMinSignedValue()) 206 BO->setHasNoSignedWrap(); 207 return BO; 208 } 209 210 if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) { 211 // Replace X*(2^C) with X << C, where C is either a scalar or a vector. 212 if (Constant *NewCst = getLogBase2(NewOp->getType(), C1)) { 213 unsigned Width = NewCst->getType()->getPrimitiveSizeInBits(); 214 BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst); 215 216 if (I.hasNoUnsignedWrap()) 217 Shl->setHasNoUnsignedWrap(); 218 if (I.hasNoSignedWrap()) { 219 const APInt *V; 220 if (match(NewCst, m_APInt(V)) && *V != Width - 1) 221 Shl->setHasNoSignedWrap(); 222 } 223 224 return Shl; 225 } 226 } 227 } 228 229 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 230 // (Y - X) * (-(2**n)) -> (X - Y) * (2**n), for positive nonzero n 231 // (Y + const) * (-(2**n)) -> (-constY) * (2**n), for positive nonzero n 232 // The "* (2**n)" thus becomes a potential shifting opportunity. 233 { 234 const APInt & Val = CI->getValue(); 235 const APInt &PosVal = Val.abs(); 236 if (Val.isNegative() && PosVal.isPowerOf2()) { 237 Value *X = nullptr, *Y = nullptr; 238 if (Op0->hasOneUse()) { 239 ConstantInt *C1; 240 Value *Sub = nullptr; 241 if (match(Op0, m_Sub(m_Value(Y), m_Value(X)))) 242 Sub = Builder.CreateSub(X, Y, "suba"); 243 else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1)))) 244 Sub = Builder.CreateSub(Builder.CreateNeg(C1), Y, "subc"); 245 if (Sub) 246 return 247 BinaryOperator::CreateMul(Sub, 248 ConstantInt::get(Y->getType(), PosVal)); 249 } 250 } 251 } 252 } 253 254 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I)) 255 return FoldedMul; 256 257 // Simplify mul instructions with a constant RHS. 258 if (isa<Constant>(Op1)) { 259 // Canonicalize (X+C1)*CI -> X*CI+C1*CI. 260 Value *X; 261 Constant *C1; 262 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) { 263 Value *Mul = Builder.CreateMul(C1, Op1); 264 // Only go forward with the transform if C1*CI simplifies to a tidier 265 // constant. 266 if (!match(Mul, m_Mul(m_Value(), m_Value()))) 267 return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul); 268 } 269 } 270 271 // -X * C --> X * -C 272 Value *X, *Y; 273 Constant *Op1C; 274 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C))) 275 return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C)); 276 277 // -X * -Y --> X * Y 278 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) { 279 auto *NewMul = BinaryOperator::CreateMul(X, Y); 280 if (I.hasNoSignedWrap() && 281 cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() && 282 cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap()) 283 NewMul->setHasNoSignedWrap(); 284 return NewMul; 285 } 286 287 // (X / Y) * Y = X - (X % Y) 288 // (X / Y) * -Y = (X % Y) - X 289 { 290 Value *Y = Op1; 291 BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0); 292 if (!Div || (Div->getOpcode() != Instruction::UDiv && 293 Div->getOpcode() != Instruction::SDiv)) { 294 Y = Op0; 295 Div = dyn_cast<BinaryOperator>(Op1); 296 } 297 Value *Neg = dyn_castNegVal(Y); 298 if (Div && Div->hasOneUse() && 299 (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) && 300 (Div->getOpcode() == Instruction::UDiv || 301 Div->getOpcode() == Instruction::SDiv)) { 302 Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1); 303 304 // If the division is exact, X % Y is zero, so we end up with X or -X. 305 if (Div->isExact()) { 306 if (DivOp1 == Y) 307 return replaceInstUsesWith(I, X); 308 return BinaryOperator::CreateNeg(X); 309 } 310 311 auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem 312 : Instruction::SRem; 313 Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1); 314 if (DivOp1 == Y) 315 return BinaryOperator::CreateSub(X, Rem); 316 return BinaryOperator::CreateSub(Rem, X); 317 } 318 } 319 320 /// i1 mul -> i1 and. 321 if (I.getType()->isIntOrIntVectorTy(1)) 322 return BinaryOperator::CreateAnd(Op0, Op1); 323 324 // X*(1 << Y) --> X << Y 325 // (1 << Y)*X --> X << Y 326 { 327 Value *Y; 328 BinaryOperator *BO = nullptr; 329 bool ShlNSW = false; 330 if (match(Op0, m_Shl(m_One(), m_Value(Y)))) { 331 BO = BinaryOperator::CreateShl(Op1, Y); 332 ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap(); 333 } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) { 334 BO = BinaryOperator::CreateShl(Op0, Y); 335 ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap(); 336 } 337 if (BO) { 338 if (I.hasNoUnsignedWrap()) 339 BO->setHasNoUnsignedWrap(); 340 if (I.hasNoSignedWrap() && ShlNSW) 341 BO->setHasNoSignedWrap(); 342 return BO; 343 } 344 } 345 346 // (bool X) * Y --> X ? Y : 0 347 // Y * (bool X) --> X ? Y : 0 348 if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 349 return SelectInst::Create(X, Op1, ConstantInt::get(I.getType(), 0)); 350 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 351 return SelectInst::Create(X, Op0, ConstantInt::get(I.getType(), 0)); 352 353 // (lshr X, 31) * Y --> (ashr X, 31) & Y 354 // Y * (lshr X, 31) --> (ashr X, 31) & Y 355 // TODO: We are not checking one-use because the elimination of the multiply 356 // is better for analysis? 357 // TODO: Should we canonicalize to '(X < 0) ? Y : 0' instead? That would be 358 // more similar to what we're doing above. 359 const APInt *C; 360 if (match(Op0, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) 361 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op1); 362 if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) 363 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0); 364 365 // Check for (mul (sext x), y), see if we can merge this into an 366 // integer mul followed by a sext. 367 if (SExtInst *Op0Conv = dyn_cast<SExtInst>(Op0)) { 368 // (mul (sext x), cst) --> (sext (mul x, cst')) 369 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 370 if (Op0Conv->hasOneUse()) { 371 Constant *CI = 372 ConstantExpr::getTrunc(Op1C, Op0Conv->getOperand(0)->getType()); 373 if (ConstantExpr::getSExt(CI, I.getType()) == Op1C && 374 willNotOverflowSignedMul(Op0Conv->getOperand(0), CI, I)) { 375 // Insert the new, smaller mul. 376 Value *NewMul = 377 Builder.CreateNSWMul(Op0Conv->getOperand(0), CI, "mulconv"); 378 return new SExtInst(NewMul, I.getType()); 379 } 380 } 381 } 382 383 // (mul (sext x), (sext y)) --> (sext (mul int x, y)) 384 if (SExtInst *Op1Conv = dyn_cast<SExtInst>(Op1)) { 385 // Only do this if x/y have the same type, if at last one of them has a 386 // single use (so we don't increase the number of sexts), and if the 387 // integer mul will not overflow. 388 if (Op0Conv->getOperand(0)->getType() == 389 Op1Conv->getOperand(0)->getType() && 390 (Op0Conv->hasOneUse() || Op1Conv->hasOneUse()) && 391 willNotOverflowSignedMul(Op0Conv->getOperand(0), 392 Op1Conv->getOperand(0), I)) { 393 // Insert the new integer mul. 394 Value *NewMul = Builder.CreateNSWMul( 395 Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv"); 396 return new SExtInst(NewMul, I.getType()); 397 } 398 } 399 } 400 401 // Check for (mul (zext x), y), see if we can merge this into an 402 // integer mul followed by a zext. 403 if (auto *Op0Conv = dyn_cast<ZExtInst>(Op0)) { 404 // (mul (zext x), cst) --> (zext (mul x, cst')) 405 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 406 if (Op0Conv->hasOneUse()) { 407 Constant *CI = 408 ConstantExpr::getTrunc(Op1C, Op0Conv->getOperand(0)->getType()); 409 if (ConstantExpr::getZExt(CI, I.getType()) == Op1C && 410 willNotOverflowUnsignedMul(Op0Conv->getOperand(0), CI, I)) { 411 // Insert the new, smaller mul. 412 Value *NewMul = 413 Builder.CreateNUWMul(Op0Conv->getOperand(0), CI, "mulconv"); 414 return new ZExtInst(NewMul, I.getType()); 415 } 416 } 417 } 418 419 // (mul (zext x), (zext y)) --> (zext (mul int x, y)) 420 if (auto *Op1Conv = dyn_cast<ZExtInst>(Op1)) { 421 // Only do this if x/y have the same type, if at last one of them has a 422 // single use (so we don't increase the number of zexts), and if the 423 // integer mul will not overflow. 424 if (Op0Conv->getOperand(0)->getType() == 425 Op1Conv->getOperand(0)->getType() && 426 (Op0Conv->hasOneUse() || Op1Conv->hasOneUse()) && 427 willNotOverflowUnsignedMul(Op0Conv->getOperand(0), 428 Op1Conv->getOperand(0), I)) { 429 // Insert the new integer mul. 430 Value *NewMul = Builder.CreateNUWMul( 431 Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv"); 432 return new ZExtInst(NewMul, I.getType()); 433 } 434 } 435 } 436 437 if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) { 438 Changed = true; 439 I.setHasNoSignedWrap(true); 440 } 441 442 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) { 443 Changed = true; 444 I.setHasNoUnsignedWrap(true); 445 } 446 447 return Changed ? &I : nullptr; 448 } 449 450 Instruction *InstCombiner::visitFMul(BinaryOperator &I) { 451 bool Changed = SimplifyAssociativeOrCommutative(I); 452 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 453 454 if (Value *V = SimplifyVectorOp(I)) 455 return replaceInstUsesWith(I, V); 456 457 if (Value *V = SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), 458 SQ.getWithInstruction(&I))) 459 return replaceInstUsesWith(I, V); 460 461 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I)) 462 return FoldedMul; 463 464 // X * -1.0 --> -X 465 if (match(Op1, m_SpecificFP(-1.0))) 466 return BinaryOperator::CreateFNegFMF(Op0, &I); 467 468 // -X * -Y --> X * Y 469 Value *X, *Y; 470 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) 471 return BinaryOperator::CreateFMulFMF(X, Y, &I); 472 473 // -X * C --> X * -C 474 Constant *C; 475 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C))) 476 return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I); 477 478 // Sink negation: -X * Y --> -(X * Y) 479 if (match(Op0, m_OneUse(m_FNeg(m_Value(X))))) 480 return BinaryOperator::CreateFNegFMF(Builder.CreateFMulFMF(X, Op1, &I), &I); 481 482 // Sink negation: Y * -X --> -(X * Y) 483 if (match(Op1, m_OneUse(m_FNeg(m_Value(X))))) 484 return BinaryOperator::CreateFNegFMF(Builder.CreateFMulFMF(X, Op0, &I), &I); 485 486 // fabs(X) * fabs(X) -> X * X 487 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::fabs>(m_Value(X)))) 488 return BinaryOperator::CreateFMulFMF(X, X, &I); 489 490 // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E) 491 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1)) 492 return replaceInstUsesWith(I, V); 493 494 if (I.hasAllowReassoc()) { 495 // Reassociate constant RHS with another constant to form constant 496 // expression. 497 if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) { 498 Constant *C1; 499 if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) { 500 // (C1 / X) * C --> (C * C1) / X 501 Constant *CC1 = ConstantExpr::getFMul(C, C1); 502 if (CC1->isNormalFP()) 503 return BinaryOperator::CreateFDivFMF(CC1, X, &I); 504 } 505 if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) { 506 // (X / C1) * C --> X * (C / C1) 507 Constant *CDivC1 = ConstantExpr::getFDiv(C, C1); 508 if (CDivC1->isNormalFP()) 509 return BinaryOperator::CreateFMulFMF(X, CDivC1, &I); 510 511 // If the constant was a denormal, try reassociating differently. 512 // (X / C1) * C --> X / (C1 / C) 513 Constant *C1DivC = ConstantExpr::getFDiv(C1, C); 514 if (Op0->hasOneUse() && C1DivC->isNormalFP()) 515 return BinaryOperator::CreateFDivFMF(X, C1DivC, &I); 516 } 517 518 // We do not need to match 'fadd C, X' and 'fsub X, C' because they are 519 // canonicalized to 'fadd X, C'. Distributing the multiply may allow 520 // further folds and (X * C) + C2 is 'fma'. 521 if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) { 522 // (X + C1) * C --> (X * C) + (C * C1) 523 Constant *CC1 = ConstantExpr::getFMul(C, C1); 524 Value *XC = Builder.CreateFMulFMF(X, C, &I); 525 return BinaryOperator::CreateFAddFMF(XC, CC1, &I); 526 } 527 if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) { 528 // (C1 - X) * C --> (C * C1) - (X * C) 529 Constant *CC1 = ConstantExpr::getFMul(C, C1); 530 Value *XC = Builder.CreateFMulFMF(X, C, &I); 531 return BinaryOperator::CreateFSubFMF(CC1, XC, &I); 532 } 533 } 534 535 // sqrt(X) * sqrt(Y) -> sqrt(X * Y) 536 // nnan disallows the possibility of returning a number if both operands are 537 // negative (in that case, we should return NaN). 538 if (I.hasNoNaNs() && 539 match(Op0, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(X)))) && 540 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) { 541 Value *XY = Builder.CreateFMulFMF(X, Y, &I); 542 Value *Sqrt = Builder.CreateIntrinsic(Intrinsic::sqrt, { XY }, &I); 543 return replaceInstUsesWith(I, Sqrt); 544 } 545 546 // (X*Y) * X => (X*X) * Y where Y != X 547 // The purpose is two-fold: 548 // 1) to form a power expression (of X). 549 // 2) potentially shorten the critical path: After transformation, the 550 // latency of the instruction Y is amortized by the expression of X*X, 551 // and therefore Y is in a "less critical" position compared to what it 552 // was before the transformation. 553 if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) && 554 Op1 != Y) { 555 Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I); 556 return BinaryOperator::CreateFMulFMF(XX, Y, &I); 557 } 558 if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) && 559 Op0 != Y) { 560 Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I); 561 return BinaryOperator::CreateFMulFMF(XX, Y, &I); 562 } 563 } 564 565 // log2(X * 0.5) * Y = log2(X) * Y - Y 566 if (I.isFast()) { 567 IntrinsicInst *Log2 = nullptr; 568 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>( 569 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) { 570 Log2 = cast<IntrinsicInst>(Op0); 571 Y = Op1; 572 } 573 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>( 574 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) { 575 Log2 = cast<IntrinsicInst>(Op1); 576 Y = Op0; 577 } 578 if (Log2) { 579 Log2->setArgOperand(0, X); 580 Log2->copyFastMathFlags(&I); 581 Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I); 582 return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I); 583 } 584 } 585 586 return Changed ? &I : nullptr; 587 } 588 589 /// Fold a divide or remainder with a select instruction divisor when one of the 590 /// select operands is zero. In that case, we can use the other select operand 591 /// because div/rem by zero is undefined. 592 bool InstCombiner::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) { 593 SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1)); 594 if (!SI) 595 return false; 596 597 int NonNullOperand; 598 if (match(SI->getTrueValue(), m_Zero())) 599 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y 600 NonNullOperand = 2; 601 else if (match(SI->getFalseValue(), m_Zero())) 602 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y 603 NonNullOperand = 1; 604 else 605 return false; 606 607 // Change the div/rem to use 'Y' instead of the select. 608 I.setOperand(1, SI->getOperand(NonNullOperand)); 609 610 // Okay, we know we replace the operand of the div/rem with 'Y' with no 611 // problem. However, the select, or the condition of the select may have 612 // multiple uses. Based on our knowledge that the operand must be non-zero, 613 // propagate the known value for the select into other uses of it, and 614 // propagate a known value of the condition into its other users. 615 616 // If the select and condition only have a single use, don't bother with this, 617 // early exit. 618 Value *SelectCond = SI->getCondition(); 619 if (SI->use_empty() && SelectCond->hasOneUse()) 620 return true; 621 622 // Scan the current block backward, looking for other uses of SI. 623 BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin(); 624 Type *CondTy = SelectCond->getType(); 625 while (BBI != BBFront) { 626 --BBI; 627 // If we found a call to a function, we can't assume it will return, so 628 // information from below it cannot be propagated above it. 629 if (isa<CallInst>(BBI) && !isa<IntrinsicInst>(BBI)) 630 break; 631 632 // Replace uses of the select or its condition with the known values. 633 for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end(); 634 I != E; ++I) { 635 if (*I == SI) { 636 *I = SI->getOperand(NonNullOperand); 637 Worklist.Add(&*BBI); 638 } else if (*I == SelectCond) { 639 *I = NonNullOperand == 1 ? ConstantInt::getTrue(CondTy) 640 : ConstantInt::getFalse(CondTy); 641 Worklist.Add(&*BBI); 642 } 643 } 644 645 // If we past the instruction, quit looking for it. 646 if (&*BBI == SI) 647 SI = nullptr; 648 if (&*BBI == SelectCond) 649 SelectCond = nullptr; 650 651 // If we ran out of things to eliminate, break out of the loop. 652 if (!SelectCond && !SI) 653 break; 654 655 } 656 return true; 657 } 658 659 /// True if the multiply can not be expressed in an int this size. 660 static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product, 661 bool IsSigned) { 662 bool Overflow; 663 Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow); 664 return Overflow; 665 } 666 667 /// True if C2 is a multiple of C1. Quotient contains C2/C1. 668 static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient, 669 bool IsSigned) { 670 assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal"); 671 672 // Bail if we will divide by zero. 673 if (C2.isNullValue()) 674 return false; 675 676 // Bail if we would divide INT_MIN by -1. 677 if (IsSigned && C1.isMinSignedValue() && C2.isAllOnesValue()) 678 return false; 679 680 APInt Remainder(C1.getBitWidth(), /*Val=*/0ULL, IsSigned); 681 if (IsSigned) 682 APInt::sdivrem(C1, C2, Quotient, Remainder); 683 else 684 APInt::udivrem(C1, C2, Quotient, Remainder); 685 686 return Remainder.isMinValue(); 687 } 688 689 /// This function implements the transforms common to both integer division 690 /// instructions (udiv and sdiv). It is called by the visitors to those integer 691 /// division instructions. 692 /// @brief Common integer divide transforms 693 Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) { 694 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 695 bool IsSigned = I.getOpcode() == Instruction::SDiv; 696 Type *Ty = I.getType(); 697 698 // The RHS is known non-zero. 699 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) { 700 I.setOperand(1, V); 701 return &I; 702 } 703 704 // Handle cases involving: [su]div X, (select Cond, Y, Z) 705 // This does not apply for fdiv. 706 if (simplifyDivRemOfSelectWithZeroOp(I)) 707 return &I; 708 709 const APInt *C2; 710 if (match(Op1, m_APInt(C2))) { 711 Value *X; 712 const APInt *C1; 713 714 // (X / C1) / C2 -> X / (C1*C2) 715 if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) || 716 (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) { 717 APInt Product(C1->getBitWidth(), /*Val=*/0ULL, IsSigned); 718 if (!multiplyOverflows(*C1, *C2, Product, IsSigned)) 719 return BinaryOperator::Create(I.getOpcode(), X, 720 ConstantInt::get(Ty, Product)); 721 } 722 723 if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) || 724 (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) { 725 APInt Quotient(C1->getBitWidth(), /*Val=*/0ULL, IsSigned); 726 727 // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1. 728 if (isMultiple(*C2, *C1, Quotient, IsSigned)) { 729 auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X, 730 ConstantInt::get(Ty, Quotient)); 731 NewDiv->setIsExact(I.isExact()); 732 return NewDiv; 733 } 734 735 // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2. 736 if (isMultiple(*C1, *C2, Quotient, IsSigned)) { 737 auto *Mul = BinaryOperator::Create(Instruction::Mul, X, 738 ConstantInt::get(Ty, Quotient)); 739 auto *OBO = cast<OverflowingBinaryOperator>(Op0); 740 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap()); 741 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap()); 742 return Mul; 743 } 744 } 745 746 if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) && 747 *C1 != C1->getBitWidth() - 1) || 748 (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))))) { 749 APInt Quotient(C1->getBitWidth(), /*Val=*/0ULL, IsSigned); 750 APInt C1Shifted = APInt::getOneBitSet( 751 C1->getBitWidth(), static_cast<unsigned>(C1->getLimitedValue())); 752 753 // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of C1. 754 if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) { 755 auto *BO = BinaryOperator::Create(I.getOpcode(), X, 756 ConstantInt::get(Ty, Quotient)); 757 BO->setIsExact(I.isExact()); 758 return BO; 759 } 760 761 // (X << C1) / C2 -> X * (C2 >> C1) if C1 is a multiple of C2. 762 if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) { 763 auto *Mul = BinaryOperator::Create(Instruction::Mul, X, 764 ConstantInt::get(Ty, Quotient)); 765 auto *OBO = cast<OverflowingBinaryOperator>(Op0); 766 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap()); 767 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap()); 768 return Mul; 769 } 770 } 771 772 if (!C2->isNullValue()) // avoid X udiv 0 773 if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I)) 774 return FoldedDiv; 775 } 776 777 if (match(Op0, m_One())) { 778 assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?"); 779 if (IsSigned) { 780 // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the 781 // result is one, if Op1 is -1 then the result is minus one, otherwise 782 // it's zero. 783 Value *Inc = Builder.CreateAdd(Op1, Op0); 784 Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3)); 785 return SelectInst::Create(Cmp, Op1, ConstantInt::get(Ty, 0)); 786 } else { 787 // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the 788 // result is one, otherwise it's zero. 789 return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty); 790 } 791 } 792 793 // See if we can fold away this div instruction. 794 if (SimplifyDemandedInstructionBits(I)) 795 return &I; 796 797 // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y 798 Value *X, *Z; 799 if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1 800 if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) || 801 (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1))))) 802 return BinaryOperator::Create(I.getOpcode(), X, Op1); 803 804 // (X << Y) / X -> 1 << Y 805 Value *Y; 806 if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y)))) 807 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y); 808 if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y)))) 809 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y); 810 811 // X / (X * Y) -> 1 / Y if the multiplication does not overflow. 812 if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) { 813 bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap(); 814 bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap(); 815 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) { 816 I.setOperand(0, ConstantInt::get(Ty, 1)); 817 I.setOperand(1, Y); 818 return &I; 819 } 820 } 821 822 return nullptr; 823 } 824 825 static const unsigned MaxDepth = 6; 826 827 namespace { 828 829 using FoldUDivOperandCb = Instruction *(*)(Value *Op0, Value *Op1, 830 const BinaryOperator &I, 831 InstCombiner &IC); 832 833 /// \brief Used to maintain state for visitUDivOperand(). 834 struct UDivFoldAction { 835 /// Informs visitUDiv() how to fold this operand. This can be zero if this 836 /// action joins two actions together. 837 FoldUDivOperandCb FoldAction; 838 839 /// Which operand to fold. 840 Value *OperandToFold; 841 842 union { 843 /// The instruction returned when FoldAction is invoked. 844 Instruction *FoldResult; 845 846 /// Stores the LHS action index if this action joins two actions together. 847 size_t SelectLHSIdx; 848 }; 849 850 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand) 851 : FoldAction(FA), OperandToFold(InputOperand), FoldResult(nullptr) {} 852 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS) 853 : FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {} 854 }; 855 856 } // end anonymous namespace 857 858 // X udiv 2^C -> X >> C 859 static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1, 860 const BinaryOperator &I, InstCombiner &IC) { 861 Constant *C1 = getLogBase2(Op0->getType(), cast<Constant>(Op1)); 862 if (!C1) 863 llvm_unreachable("Failed to constant fold udiv -> logbase2"); 864 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, C1); 865 if (I.isExact()) 866 LShr->setIsExact(); 867 return LShr; 868 } 869 870 // X udiv C, where C >= signbit 871 static Instruction *foldUDivNegCst(Value *Op0, Value *Op1, 872 const BinaryOperator &I, InstCombiner &IC) { 873 Value *ICI = IC.Builder.CreateICmpULT(Op0, cast<Constant>(Op1)); 874 return SelectInst::Create(ICI, Constant::getNullValue(I.getType()), 875 ConstantInt::get(I.getType(), 1)); 876 } 877 878 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2) 879 // X udiv (zext (C1 << N)), where C1 is "1<<C2" --> X >> (N+C2) 880 static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I, 881 InstCombiner &IC) { 882 Value *ShiftLeft; 883 if (!match(Op1, m_ZExt(m_Value(ShiftLeft)))) 884 ShiftLeft = Op1; 885 886 Constant *CI; 887 Value *N; 888 if (!match(ShiftLeft, m_Shl(m_Constant(CI), m_Value(N)))) 889 llvm_unreachable("match should never fail here!"); 890 Constant *Log2Base = getLogBase2(N->getType(), CI); 891 if (!Log2Base) 892 llvm_unreachable("getLogBase2 should never fail here!"); 893 N = IC.Builder.CreateAdd(N, Log2Base); 894 if (Op1 != ShiftLeft) 895 N = IC.Builder.CreateZExt(N, Op1->getType()); 896 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N); 897 if (I.isExact()) 898 LShr->setIsExact(); 899 return LShr; 900 } 901 902 // \brief Recursively visits the possible right hand operands of a udiv 903 // instruction, seeing through select instructions, to determine if we can 904 // replace the udiv with something simpler. If we find that an operand is not 905 // able to simplify the udiv, we abort the entire transformation. 906 static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I, 907 SmallVectorImpl<UDivFoldAction> &Actions, 908 unsigned Depth = 0) { 909 // Check to see if this is an unsigned division with an exact power of 2, 910 // if so, convert to a right shift. 911 if (match(Op1, m_Power2())) { 912 Actions.push_back(UDivFoldAction(foldUDivPow2Cst, Op1)); 913 return Actions.size(); 914 } 915 916 // X udiv C, where C >= signbit 917 if (match(Op1, m_Negative())) { 918 Actions.push_back(UDivFoldAction(foldUDivNegCst, Op1)); 919 return Actions.size(); 920 } 921 922 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2) 923 if (match(Op1, m_Shl(m_Power2(), m_Value())) || 924 match(Op1, m_ZExt(m_Shl(m_Power2(), m_Value())))) { 925 Actions.push_back(UDivFoldAction(foldUDivShl, Op1)); 926 return Actions.size(); 927 } 928 929 // The remaining tests are all recursive, so bail out if we hit the limit. 930 if (Depth++ == MaxDepth) 931 return 0; 932 933 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 934 if (size_t LHSIdx = 935 visitUDivOperand(Op0, SI->getOperand(1), I, Actions, Depth)) 936 if (visitUDivOperand(Op0, SI->getOperand(2), I, Actions, Depth)) { 937 Actions.push_back(UDivFoldAction(nullptr, Op1, LHSIdx - 1)); 938 return Actions.size(); 939 } 940 941 return 0; 942 } 943 944 /// If we have zero-extended operands of an unsigned div or rem, we may be able 945 /// to narrow the operation (sink the zext below the math). 946 static Instruction *narrowUDivURem(BinaryOperator &I, 947 InstCombiner::BuilderTy &Builder) { 948 Instruction::BinaryOps Opcode = I.getOpcode(); 949 Value *N = I.getOperand(0); 950 Value *D = I.getOperand(1); 951 Type *Ty = I.getType(); 952 Value *X, *Y; 953 if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) && 954 X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) { 955 // udiv (zext X), (zext Y) --> zext (udiv X, Y) 956 // urem (zext X), (zext Y) --> zext (urem X, Y) 957 Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y); 958 return new ZExtInst(NarrowOp, Ty); 959 } 960 961 Constant *C; 962 if ((match(N, m_OneUse(m_ZExt(m_Value(X)))) && match(D, m_Constant(C))) || 963 (match(D, m_OneUse(m_ZExt(m_Value(X)))) && match(N, m_Constant(C)))) { 964 // If the constant is the same in the smaller type, use the narrow version. 965 Constant *TruncC = ConstantExpr::getTrunc(C, X->getType()); 966 if (ConstantExpr::getZExt(TruncC, Ty) != C) 967 return nullptr; 968 969 // udiv (zext X), C --> zext (udiv X, C') 970 // urem (zext X), C --> zext (urem X, C') 971 // udiv C, (zext X) --> zext (udiv C', X) 972 // urem C, (zext X) --> zext (urem C', X) 973 Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC) 974 : Builder.CreateBinOp(Opcode, TruncC, X); 975 return new ZExtInst(NarrowOp, Ty); 976 } 977 978 return nullptr; 979 } 980 981 Instruction *InstCombiner::visitUDiv(BinaryOperator &I) { 982 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 983 984 if (Value *V = SimplifyVectorOp(I)) 985 return replaceInstUsesWith(I, V); 986 987 if (Value *V = SimplifyUDivInst(Op0, Op1, SQ.getWithInstruction(&I))) 988 return replaceInstUsesWith(I, V); 989 990 // Handle the integer div common cases 991 if (Instruction *Common = commonIDivTransforms(I)) 992 return Common; 993 994 // (x lshr C1) udiv C2 --> x udiv (C2 << C1) 995 { 996 Value *X; 997 const APInt *C1, *C2; 998 if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && 999 match(Op1, m_APInt(C2))) { 1000 bool Overflow; 1001 APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow); 1002 if (!Overflow) { 1003 bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value())); 1004 BinaryOperator *BO = BinaryOperator::CreateUDiv( 1005 X, ConstantInt::get(X->getType(), C2ShlC1)); 1006 if (IsExact) 1007 BO->setIsExact(); 1008 return BO; 1009 } 1010 } 1011 } 1012 1013 if (Instruction *NarrowDiv = narrowUDivURem(I, Builder)) 1014 return NarrowDiv; 1015 1016 // (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...)))) 1017 SmallVector<UDivFoldAction, 6> UDivActions; 1018 if (visitUDivOperand(Op0, Op1, I, UDivActions)) 1019 for (unsigned i = 0, e = UDivActions.size(); i != e; ++i) { 1020 FoldUDivOperandCb Action = UDivActions[i].FoldAction; 1021 Value *ActionOp1 = UDivActions[i].OperandToFold; 1022 Instruction *Inst; 1023 if (Action) 1024 Inst = Action(Op0, ActionOp1, I, *this); 1025 else { 1026 // This action joins two actions together. The RHS of this action is 1027 // simply the last action we processed, we saved the LHS action index in 1028 // the joining action. 1029 size_t SelectRHSIdx = i - 1; 1030 Value *SelectRHS = UDivActions[SelectRHSIdx].FoldResult; 1031 size_t SelectLHSIdx = UDivActions[i].SelectLHSIdx; 1032 Value *SelectLHS = UDivActions[SelectLHSIdx].FoldResult; 1033 Inst = SelectInst::Create(cast<SelectInst>(ActionOp1)->getCondition(), 1034 SelectLHS, SelectRHS); 1035 } 1036 1037 // If this is the last action to process, return it to the InstCombiner. 1038 // Otherwise, we insert it before the UDiv and record it so that we may 1039 // use it as part of a joining action (i.e., a SelectInst). 1040 if (e - i != 1) { 1041 Inst->insertBefore(&I); 1042 UDivActions[i].FoldResult = Inst; 1043 } else 1044 return Inst; 1045 } 1046 1047 return nullptr; 1048 } 1049 1050 Instruction *InstCombiner::visitSDiv(BinaryOperator &I) { 1051 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1052 1053 if (Value *V = SimplifyVectorOp(I)) 1054 return replaceInstUsesWith(I, V); 1055 1056 if (Value *V = SimplifySDivInst(Op0, Op1, SQ.getWithInstruction(&I))) 1057 return replaceInstUsesWith(I, V); 1058 1059 // Handle the integer div common cases 1060 if (Instruction *Common = commonIDivTransforms(I)) 1061 return Common; 1062 1063 const APInt *Op1C; 1064 if (match(Op1, m_APInt(Op1C))) { 1065 // sdiv X, -1 == -X 1066 if (Op1C->isAllOnesValue()) 1067 return BinaryOperator::CreateNeg(Op0); 1068 1069 // sdiv exact X, C --> ashr exact X, log2(C) 1070 if (I.isExact() && Op1C->isNonNegative() && Op1C->isPowerOf2()) { 1071 Value *ShAmt = ConstantInt::get(Op1->getType(), Op1C->exactLogBase2()); 1072 return BinaryOperator::CreateExactAShr(Op0, ShAmt, I.getName()); 1073 } 1074 1075 // If the dividend is sign-extended and the constant divisor is small enough 1076 // to fit in the source type, shrink the division to the narrower type: 1077 // (sext X) sdiv C --> sext (X sdiv C) 1078 Value *Op0Src; 1079 if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) && 1080 Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) { 1081 1082 // In the general case, we need to make sure that the dividend is not the 1083 // minimum signed value because dividing that by -1 is UB. But here, we 1084 // know that the -1 divisor case is already handled above. 1085 1086 Constant *NarrowDivisor = 1087 ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType()); 1088 Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor); 1089 return new SExtInst(NarrowOp, Op0->getType()); 1090 } 1091 } 1092 1093 if (Constant *RHS = dyn_cast<Constant>(Op1)) { 1094 // X/INT_MIN -> X == INT_MIN 1095 if (RHS->isMinSignedValue()) 1096 return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), I.getType()); 1097 1098 // -X/C --> X/-C provided the negation doesn't overflow. 1099 Value *X; 1100 if (match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) { 1101 auto *BO = BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(RHS)); 1102 BO->setIsExact(I.isExact()); 1103 return BO; 1104 } 1105 } 1106 1107 // If the sign bits of both operands are zero (i.e. we can prove they are 1108 // unsigned inputs), turn this into a udiv. 1109 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); 1110 if (MaskedValueIsZero(Op0, Mask, 0, &I)) { 1111 if (MaskedValueIsZero(Op1, Mask, 0, &I)) { 1112 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set 1113 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); 1114 BO->setIsExact(I.isExact()); 1115 return BO; 1116 } 1117 1118 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { 1119 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y) 1120 // Safe because the only negative value (1 << Y) can take on is 1121 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have 1122 // the sign bit set. 1123 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); 1124 BO->setIsExact(I.isExact()); 1125 return BO; 1126 } 1127 } 1128 1129 return nullptr; 1130 } 1131 1132 /// Remove negation and try to convert division into multiplication. 1133 static Instruction *foldFDivConstantDivisor(BinaryOperator &I) { 1134 Constant *C; 1135 if (!match(I.getOperand(1), m_Constant(C))) 1136 return nullptr; 1137 1138 // -X / C --> X / -C 1139 Value *X; 1140 if (match(I.getOperand(0), m_FNeg(m_Value(X)))) 1141 return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I); 1142 1143 // If the constant divisor has an exact inverse, this is always safe. If not, 1144 // then we can still create a reciprocal if fast-math-flags allow it and the 1145 // constant is a regular number (not zero, infinite, or denormal). 1146 if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP()))) 1147 return nullptr; 1148 1149 // Disallow denormal constants because we don't know what would happen 1150 // on all targets. 1151 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that 1152 // denorms are flushed? 1153 auto *RecipC = ConstantExpr::getFDiv(ConstantFP::get(I.getType(), 1.0), C); 1154 if (!RecipC->isNormalFP()) 1155 return nullptr; 1156 1157 // X / C --> X * (1 / C) 1158 return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I); 1159 } 1160 1161 /// Remove negation and try to reassociate constant math. 1162 static Instruction *foldFDivConstantDividend(BinaryOperator &I) { 1163 Constant *C; 1164 if (!match(I.getOperand(0), m_Constant(C))) 1165 return nullptr; 1166 1167 // C / -X --> -C / X 1168 Value *X; 1169 if (match(I.getOperand(1), m_FNeg(m_Value(X)))) 1170 return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I); 1171 1172 if (!I.hasAllowReassoc() || !I.hasAllowReciprocal()) 1173 return nullptr; 1174 1175 // Try to reassociate C / X expressions where X includes another constant. 1176 Constant *C2, *NewC = nullptr; 1177 if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) { 1178 // C / (X * C2) --> (C / C2) / X 1179 NewC = ConstantExpr::getFDiv(C, C2); 1180 } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) { 1181 // C / (X / C2) --> (C * C2) / X 1182 NewC = ConstantExpr::getFMul(C, C2); 1183 } 1184 // Disallow denormal constants because we don't know what would happen 1185 // on all targets. 1186 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that 1187 // denorms are flushed? 1188 if (!NewC || !NewC->isNormalFP()) 1189 return nullptr; 1190 1191 return BinaryOperator::CreateFDivFMF(NewC, X, &I); 1192 } 1193 1194 Instruction *InstCombiner::visitFDiv(BinaryOperator &I) { 1195 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1196 1197 if (Value *V = SimplifyVectorOp(I)) 1198 return replaceInstUsesWith(I, V); 1199 1200 if (Value *V = SimplifyFDivInst(Op0, Op1, I.getFastMathFlags(), 1201 SQ.getWithInstruction(&I))) 1202 return replaceInstUsesWith(I, V); 1203 1204 if (Instruction *R = foldFDivConstantDivisor(I)) 1205 return R; 1206 1207 if (Instruction *R = foldFDivConstantDividend(I)) 1208 return R; 1209 1210 if (isa<Constant>(Op0)) 1211 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 1212 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1213 return R; 1214 1215 if (isa<Constant>(Op1)) 1216 if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) 1217 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1218 return R; 1219 1220 if (I.hasAllowReassoc() && I.hasAllowReciprocal()) { 1221 Value *X, *Y; 1222 if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) && 1223 (!isa<Constant>(Y) || !isa<Constant>(Op1))) { 1224 // (X / Y) / Z => X / (Y * Z) 1225 Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I); 1226 return BinaryOperator::CreateFDivFMF(X, YZ, &I); 1227 } 1228 if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) && 1229 (!isa<Constant>(Y) || !isa<Constant>(Op0))) { 1230 // Z / (X / Y) => (Y * Z) / X 1231 Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I); 1232 return BinaryOperator::CreateFDivFMF(YZ, X, &I); 1233 } 1234 } 1235 1236 if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) { 1237 // sin(X) / cos(X) -> tan(X) 1238 // cos(X) / sin(X) -> 1/tan(X) (cotangent) 1239 Value *X; 1240 bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) && 1241 match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X))); 1242 bool IsCot = 1243 !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) && 1244 match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X))); 1245 1246 if ((IsTan || IsCot) && hasUnaryFloatFn(&TLI, I.getType(), LibFunc_tan, 1247 LibFunc_tanf, LibFunc_tanl)) { 1248 IRBuilder<> B(&I); 1249 IRBuilder<>::FastMathFlagGuard FMFGuard(B); 1250 B.setFastMathFlags(I.getFastMathFlags()); 1251 AttributeList Attrs = CallSite(Op0).getCalledFunction()->getAttributes(); 1252 Value *Res = emitUnaryFloatFnCall(X, TLI.getName(LibFunc_tan), B, Attrs); 1253 if (IsCot) 1254 Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res); 1255 return replaceInstUsesWith(I, Res); 1256 } 1257 } 1258 1259 // -X / -Y -> X / Y 1260 Value *X, *Y; 1261 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) { 1262 I.setOperand(0, X); 1263 I.setOperand(1, Y); 1264 return &I; 1265 } 1266 1267 // X / (X * Y) --> 1.0 / Y 1268 // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed. 1269 // We can ignore the possibility that X is infinity because INF/INF is NaN. 1270 if (I.hasNoNaNs() && I.hasAllowReassoc() && 1271 match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) { 1272 I.setOperand(0, ConstantFP::get(I.getType(), 1.0)); 1273 I.setOperand(1, Y); 1274 return &I; 1275 } 1276 1277 return nullptr; 1278 } 1279 1280 /// This function implements the transforms common to both integer remainder 1281 /// instructions (urem and srem). It is called by the visitors to those integer 1282 /// remainder instructions. 1283 /// @brief Common integer remainder transforms 1284 Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) { 1285 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1286 1287 // The RHS is known non-zero. 1288 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) { 1289 I.setOperand(1, V); 1290 return &I; 1291 } 1292 1293 // Handle cases involving: rem X, (select Cond, Y, Z) 1294 if (simplifyDivRemOfSelectWithZeroOp(I)) 1295 return &I; 1296 1297 if (isa<Constant>(Op1)) { 1298 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) { 1299 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) { 1300 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1301 return R; 1302 } else if (auto *PN = dyn_cast<PHINode>(Op0I)) { 1303 const APInt *Op1Int; 1304 if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() && 1305 (I.getOpcode() == Instruction::URem || 1306 !Op1Int->isMinSignedValue())) { 1307 // foldOpIntoPhi will speculate instructions to the end of the PHI's 1308 // predecessor blocks, so do this only if we know the srem or urem 1309 // will not fault. 1310 if (Instruction *NV = foldOpIntoPhi(I, PN)) 1311 return NV; 1312 } 1313 } 1314 1315 // See if we can fold away this rem instruction. 1316 if (SimplifyDemandedInstructionBits(I)) 1317 return &I; 1318 } 1319 } 1320 1321 return nullptr; 1322 } 1323 1324 Instruction *InstCombiner::visitURem(BinaryOperator &I) { 1325 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1326 1327 if (Value *V = SimplifyVectorOp(I)) 1328 return replaceInstUsesWith(I, V); 1329 1330 if (Value *V = SimplifyURemInst(Op0, Op1, SQ.getWithInstruction(&I))) 1331 return replaceInstUsesWith(I, V); 1332 1333 if (Instruction *common = commonIRemTransforms(I)) 1334 return common; 1335 1336 if (Instruction *NarrowRem = narrowUDivURem(I, Builder)) 1337 return NarrowRem; 1338 1339 // X urem Y -> X and Y-1, where Y is a power of 2, 1340 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { 1341 Constant *N1 = Constant::getAllOnesValue(I.getType()); 1342 Value *Add = Builder.CreateAdd(Op1, N1); 1343 return BinaryOperator::CreateAnd(Op0, Add); 1344 } 1345 1346 // 1 urem X -> zext(X != 1) 1347 if (match(Op0, m_One())) { 1348 Value *Cmp = Builder.CreateICmpNE(Op1, Op0); 1349 Value *Ext = Builder.CreateZExt(Cmp, I.getType()); 1350 return replaceInstUsesWith(I, Ext); 1351 } 1352 1353 // X urem C -> X < C ? X : X - C, where C >= signbit. 1354 if (match(Op1, m_Negative())) { 1355 Value *Cmp = Builder.CreateICmpULT(Op0, Op1); 1356 Value *Sub = Builder.CreateSub(Op0, Op1); 1357 return SelectInst::Create(Cmp, Op0, Sub); 1358 } 1359 1360 return nullptr; 1361 } 1362 1363 Instruction *InstCombiner::visitSRem(BinaryOperator &I) { 1364 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1365 1366 if (Value *V = SimplifyVectorOp(I)) 1367 return replaceInstUsesWith(I, V); 1368 1369 if (Value *V = SimplifySRemInst(Op0, Op1, SQ.getWithInstruction(&I))) 1370 return replaceInstUsesWith(I, V); 1371 1372 // Handle the integer rem common cases 1373 if (Instruction *Common = commonIRemTransforms(I)) 1374 return Common; 1375 1376 { 1377 const APInt *Y; 1378 // X % -Y -> X % Y 1379 if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue()) { 1380 Worklist.AddValue(I.getOperand(1)); 1381 I.setOperand(1, ConstantInt::get(I.getType(), -*Y)); 1382 return &I; 1383 } 1384 } 1385 1386 // If the sign bits of both operands are zero (i.e. we can prove they are 1387 // unsigned inputs), turn this into a urem. 1388 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); 1389 if (MaskedValueIsZero(Op1, Mask, 0, &I) && 1390 MaskedValueIsZero(Op0, Mask, 0, &I)) { 1391 // X srem Y -> X urem Y, iff X and Y don't have sign bit set 1392 return BinaryOperator::CreateURem(Op0, Op1, I.getName()); 1393 } 1394 1395 // If it's a constant vector, flip any negative values positive. 1396 if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) { 1397 Constant *C = cast<Constant>(Op1); 1398 unsigned VWidth = C->getType()->getVectorNumElements(); 1399 1400 bool hasNegative = false; 1401 bool hasMissing = false; 1402 for (unsigned i = 0; i != VWidth; ++i) { 1403 Constant *Elt = C->getAggregateElement(i); 1404 if (!Elt) { 1405 hasMissing = true; 1406 break; 1407 } 1408 1409 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt)) 1410 if (RHS->isNegative()) 1411 hasNegative = true; 1412 } 1413 1414 if (hasNegative && !hasMissing) { 1415 SmallVector<Constant *, 16> Elts(VWidth); 1416 for (unsigned i = 0; i != VWidth; ++i) { 1417 Elts[i] = C->getAggregateElement(i); // Handle undef, etc. 1418 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) { 1419 if (RHS->isNegative()) 1420 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS)); 1421 } 1422 } 1423 1424 Constant *NewRHSV = ConstantVector::get(Elts); 1425 if (NewRHSV != C) { // Don't loop on -MININT 1426 Worklist.AddValue(I.getOperand(1)); 1427 I.setOperand(1, NewRHSV); 1428 return &I; 1429 } 1430 } 1431 } 1432 1433 return nullptr; 1434 } 1435 1436 Instruction *InstCombiner::visitFRem(BinaryOperator &I) { 1437 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1438 1439 if (Value *V = SimplifyVectorOp(I)) 1440 return replaceInstUsesWith(I, V); 1441 1442 if (Value *V = SimplifyFRemInst(Op0, Op1, I.getFastMathFlags(), 1443 SQ.getWithInstruction(&I))) 1444 return replaceInstUsesWith(I, V); 1445 1446 return nullptr; 1447 } 1448