1 //===- InstCombineMulDivRem.cpp -------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv, 11 // srem, urem, frem. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "InstCombineInternal.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/IR/BasicBlock.h" 21 #include "llvm/IR/Constant.h" 22 #include "llvm/IR/Constants.h" 23 #include "llvm/IR/InstrTypes.h" 24 #include "llvm/IR/Instruction.h" 25 #include "llvm/IR/Instructions.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/IR/Intrinsics.h" 28 #include "llvm/IR/Operator.h" 29 #include "llvm/IR/PatternMatch.h" 30 #include "llvm/IR/Type.h" 31 #include "llvm/IR/Value.h" 32 #include "llvm/Support/Casting.h" 33 #include "llvm/Support/ErrorHandling.h" 34 #include "llvm/Support/KnownBits.h" 35 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h" 36 #include "llvm/Transforms/Utils/BuildLibCalls.h" 37 #include <cassert> 38 #include <cstddef> 39 #include <cstdint> 40 #include <utility> 41 42 using namespace llvm; 43 using namespace PatternMatch; 44 45 #define DEBUG_TYPE "instcombine" 46 47 /// The specific integer value is used in a context where it is known to be 48 /// non-zero. If this allows us to simplify the computation, do so and return 49 /// the new operand, otherwise return null. 50 static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC, 51 Instruction &CxtI) { 52 // If V has multiple uses, then we would have to do more analysis to determine 53 // if this is safe. For example, the use could be in dynamically unreached 54 // code. 55 if (!V->hasOneUse()) return nullptr; 56 57 bool MadeChange = false; 58 59 // ((1 << A) >>u B) --> (1 << (A-B)) 60 // Because V cannot be zero, we know that B is less than A. 61 Value *A = nullptr, *B = nullptr, *One = nullptr; 62 if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) && 63 match(One, m_One())) { 64 A = IC.Builder.CreateSub(A, B); 65 return IC.Builder.CreateShl(One, A); 66 } 67 68 // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it 69 // inexact. Similarly for <<. 70 BinaryOperator *I = dyn_cast<BinaryOperator>(V); 71 if (I && I->isLogicalShift() && 72 IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) { 73 // We know that this is an exact/nuw shift and that the input is a 74 // non-zero context as well. 75 if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) { 76 I->setOperand(0, V2); 77 MadeChange = true; 78 } 79 80 if (I->getOpcode() == Instruction::LShr && !I->isExact()) { 81 I->setIsExact(); 82 MadeChange = true; 83 } 84 85 if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) { 86 I->setHasNoUnsignedWrap(); 87 MadeChange = true; 88 } 89 } 90 91 // TODO: Lots more we could do here: 92 // If V is a phi node, we can call this on each of its operands. 93 // "select cond, X, 0" can simplify to "X". 94 95 return MadeChange ? V : nullptr; 96 } 97 98 /// A helper routine of InstCombiner::visitMul(). 99 /// 100 /// If C is a scalar/vector of known powers of 2, then this function returns 101 /// a new scalar/vector obtained from logBase2 of C. 102 /// Return a null pointer otherwise. 103 static Constant *getLogBase2(Type *Ty, Constant *C) { 104 const APInt *IVal; 105 if (match(C, m_APInt(IVal)) && IVal->isPowerOf2()) 106 return ConstantInt::get(Ty, IVal->logBase2()); 107 108 if (!Ty->isVectorTy()) 109 return nullptr; 110 111 SmallVector<Constant *, 4> Elts; 112 for (unsigned I = 0, E = Ty->getVectorNumElements(); I != E; ++I) { 113 Constant *Elt = C->getAggregateElement(I); 114 if (!Elt) 115 return nullptr; 116 if (isa<UndefValue>(Elt)) { 117 Elts.push_back(UndefValue::get(Ty->getScalarType())); 118 continue; 119 } 120 if (!match(Elt, m_APInt(IVal)) || !IVal->isPowerOf2()) 121 return nullptr; 122 Elts.push_back(ConstantInt::get(Ty->getScalarType(), IVal->logBase2())); 123 } 124 125 return ConstantVector::get(Elts); 126 } 127 128 Instruction *InstCombiner::visitMul(BinaryOperator &I) { 129 if (Value *V = SimplifyMulInst(I.getOperand(0), I.getOperand(1), 130 SQ.getWithInstruction(&I))) 131 return replaceInstUsesWith(I, V); 132 133 if (SimplifyAssociativeOrCommutative(I)) 134 return &I; 135 136 if (Instruction *X = foldVectorBinop(I)) 137 return X; 138 139 if (Value *V = SimplifyUsingDistributiveLaws(I)) 140 return replaceInstUsesWith(I, V); 141 142 // X * -1 == 0 - X 143 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 144 if (match(Op1, m_AllOnes())) { 145 BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName()); 146 if (I.hasNoSignedWrap()) 147 BO->setHasNoSignedWrap(); 148 return BO; 149 } 150 151 // Also allow combining multiply instructions on vectors. 152 { 153 Value *NewOp; 154 Constant *C1, *C2; 155 const APInt *IVal; 156 if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)), 157 m_Constant(C1))) && 158 match(C1, m_APInt(IVal))) { 159 // ((X << C2)*C1) == (X * (C1 << C2)) 160 Constant *Shl = ConstantExpr::getShl(C1, C2); 161 BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0)); 162 BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl); 163 if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap()) 164 BO->setHasNoUnsignedWrap(); 165 if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() && 166 Shl->isNotMinSignedValue()) 167 BO->setHasNoSignedWrap(); 168 return BO; 169 } 170 171 if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) { 172 // Replace X*(2^C) with X << C, where C is either a scalar or a vector. 173 if (Constant *NewCst = getLogBase2(NewOp->getType(), C1)) { 174 BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst); 175 176 if (I.hasNoUnsignedWrap()) 177 Shl->setHasNoUnsignedWrap(); 178 if (I.hasNoSignedWrap()) { 179 const APInt *V; 180 if (match(NewCst, m_APInt(V)) && *V != V->getBitWidth() - 1) 181 Shl->setHasNoSignedWrap(); 182 } 183 184 return Shl; 185 } 186 } 187 } 188 189 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 190 // (Y - X) * (-(2**n)) -> (X - Y) * (2**n), for positive nonzero n 191 // (Y + const) * (-(2**n)) -> (-constY) * (2**n), for positive nonzero n 192 // The "* (2**n)" thus becomes a potential shifting opportunity. 193 { 194 const APInt & Val = CI->getValue(); 195 const APInt &PosVal = Val.abs(); 196 if (Val.isNegative() && PosVal.isPowerOf2()) { 197 Value *X = nullptr, *Y = nullptr; 198 if (Op0->hasOneUse()) { 199 ConstantInt *C1; 200 Value *Sub = nullptr; 201 if (match(Op0, m_Sub(m_Value(Y), m_Value(X)))) 202 Sub = Builder.CreateSub(X, Y, "suba"); 203 else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1)))) 204 Sub = Builder.CreateSub(Builder.CreateNeg(C1), Y, "subc"); 205 if (Sub) 206 return 207 BinaryOperator::CreateMul(Sub, 208 ConstantInt::get(Y->getType(), PosVal)); 209 } 210 } 211 } 212 } 213 214 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I)) 215 return FoldedMul; 216 217 // Simplify mul instructions with a constant RHS. 218 if (isa<Constant>(Op1)) { 219 // Canonicalize (X+C1)*CI -> X*CI+C1*CI. 220 Value *X; 221 Constant *C1; 222 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) { 223 Value *Mul = Builder.CreateMul(C1, Op1); 224 // Only go forward with the transform if C1*CI simplifies to a tidier 225 // constant. 226 if (!match(Mul, m_Mul(m_Value(), m_Value()))) 227 return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul); 228 } 229 } 230 231 // -X * C --> X * -C 232 Value *X, *Y; 233 Constant *Op1C; 234 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C))) 235 return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C)); 236 237 // -X * -Y --> X * Y 238 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) { 239 auto *NewMul = BinaryOperator::CreateMul(X, Y); 240 if (I.hasNoSignedWrap() && 241 cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() && 242 cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap()) 243 NewMul->setHasNoSignedWrap(); 244 return NewMul; 245 } 246 247 // -X * Y --> -(X * Y) 248 // X * -Y --> -(X * Y) 249 if (match(&I, m_c_Mul(m_OneUse(m_Neg(m_Value(X))), m_Value(Y)))) 250 return BinaryOperator::CreateNeg(Builder.CreateMul(X, Y)); 251 252 // (X / Y) * Y = X - (X % Y) 253 // (X / Y) * -Y = (X % Y) - X 254 { 255 Value *Y = Op1; 256 BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0); 257 if (!Div || (Div->getOpcode() != Instruction::UDiv && 258 Div->getOpcode() != Instruction::SDiv)) { 259 Y = Op0; 260 Div = dyn_cast<BinaryOperator>(Op1); 261 } 262 Value *Neg = dyn_castNegVal(Y); 263 if (Div && Div->hasOneUse() && 264 (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) && 265 (Div->getOpcode() == Instruction::UDiv || 266 Div->getOpcode() == Instruction::SDiv)) { 267 Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1); 268 269 // If the division is exact, X % Y is zero, so we end up with X or -X. 270 if (Div->isExact()) { 271 if (DivOp1 == Y) 272 return replaceInstUsesWith(I, X); 273 return BinaryOperator::CreateNeg(X); 274 } 275 276 auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem 277 : Instruction::SRem; 278 Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1); 279 if (DivOp1 == Y) 280 return BinaryOperator::CreateSub(X, Rem); 281 return BinaryOperator::CreateSub(Rem, X); 282 } 283 } 284 285 /// i1 mul -> i1 and. 286 if (I.getType()->isIntOrIntVectorTy(1)) 287 return BinaryOperator::CreateAnd(Op0, Op1); 288 289 // X*(1 << Y) --> X << Y 290 // (1 << Y)*X --> X << Y 291 { 292 Value *Y; 293 BinaryOperator *BO = nullptr; 294 bool ShlNSW = false; 295 if (match(Op0, m_Shl(m_One(), m_Value(Y)))) { 296 BO = BinaryOperator::CreateShl(Op1, Y); 297 ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap(); 298 } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) { 299 BO = BinaryOperator::CreateShl(Op0, Y); 300 ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap(); 301 } 302 if (BO) { 303 if (I.hasNoUnsignedWrap()) 304 BO->setHasNoUnsignedWrap(); 305 if (I.hasNoSignedWrap() && ShlNSW) 306 BO->setHasNoSignedWrap(); 307 return BO; 308 } 309 } 310 311 // (bool X) * Y --> X ? Y : 0 312 // Y * (bool X) --> X ? Y : 0 313 if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 314 return SelectInst::Create(X, Op1, ConstantInt::get(I.getType(), 0)); 315 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 316 return SelectInst::Create(X, Op0, ConstantInt::get(I.getType(), 0)); 317 318 // (lshr X, 31) * Y --> (ashr X, 31) & Y 319 // Y * (lshr X, 31) --> (ashr X, 31) & Y 320 // TODO: We are not checking one-use because the elimination of the multiply 321 // is better for analysis? 322 // TODO: Should we canonicalize to '(X < 0) ? Y : 0' instead? That would be 323 // more similar to what we're doing above. 324 const APInt *C; 325 if (match(Op0, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) 326 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op1); 327 if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1) 328 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0); 329 330 if (Instruction *Ext = narrowMathIfNoOverflow(I)) 331 return Ext; 332 333 bool Changed = false; 334 if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) { 335 Changed = true; 336 I.setHasNoSignedWrap(true); 337 } 338 339 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) { 340 Changed = true; 341 I.setHasNoUnsignedWrap(true); 342 } 343 344 return Changed ? &I : nullptr; 345 } 346 347 Instruction *InstCombiner::visitFMul(BinaryOperator &I) { 348 if (Value *V = SimplifyFMulInst(I.getOperand(0), I.getOperand(1), 349 I.getFastMathFlags(), 350 SQ.getWithInstruction(&I))) 351 return replaceInstUsesWith(I, V); 352 353 if (SimplifyAssociativeOrCommutative(I)) 354 return &I; 355 356 if (Instruction *X = foldVectorBinop(I)) 357 return X; 358 359 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I)) 360 return FoldedMul; 361 362 // X * -1.0 --> -X 363 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 364 if (match(Op1, m_SpecificFP(-1.0))) 365 return BinaryOperator::CreateFNegFMF(Op0, &I); 366 367 // -X * -Y --> X * Y 368 Value *X, *Y; 369 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) 370 return BinaryOperator::CreateFMulFMF(X, Y, &I); 371 372 // -X * C --> X * -C 373 Constant *C; 374 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C))) 375 return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I); 376 377 // Sink negation: -X * Y --> -(X * Y) 378 if (match(Op0, m_OneUse(m_FNeg(m_Value(X))))) 379 return BinaryOperator::CreateFNegFMF(Builder.CreateFMulFMF(X, Op1, &I), &I); 380 381 // Sink negation: Y * -X --> -(X * Y) 382 if (match(Op1, m_OneUse(m_FNeg(m_Value(X))))) 383 return BinaryOperator::CreateFNegFMF(Builder.CreateFMulFMF(X, Op0, &I), &I); 384 385 // fabs(X) * fabs(X) -> X * X 386 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::fabs>(m_Value(X)))) 387 return BinaryOperator::CreateFMulFMF(X, X, &I); 388 389 // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E) 390 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1)) 391 return replaceInstUsesWith(I, V); 392 393 if (I.hasAllowReassoc()) { 394 // Reassociate constant RHS with another constant to form constant 395 // expression. 396 if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) { 397 Constant *C1; 398 if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) { 399 // (C1 / X) * C --> (C * C1) / X 400 Constant *CC1 = ConstantExpr::getFMul(C, C1); 401 if (CC1->isNormalFP()) 402 return BinaryOperator::CreateFDivFMF(CC1, X, &I); 403 } 404 if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) { 405 // (X / C1) * C --> X * (C / C1) 406 Constant *CDivC1 = ConstantExpr::getFDiv(C, C1); 407 if (CDivC1->isNormalFP()) 408 return BinaryOperator::CreateFMulFMF(X, CDivC1, &I); 409 410 // If the constant was a denormal, try reassociating differently. 411 // (X / C1) * C --> X / (C1 / C) 412 Constant *C1DivC = ConstantExpr::getFDiv(C1, C); 413 if (Op0->hasOneUse() && C1DivC->isNormalFP()) 414 return BinaryOperator::CreateFDivFMF(X, C1DivC, &I); 415 } 416 417 // We do not need to match 'fadd C, X' and 'fsub X, C' because they are 418 // canonicalized to 'fadd X, C'. Distributing the multiply may allow 419 // further folds and (X * C) + C2 is 'fma'. 420 if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) { 421 // (X + C1) * C --> (X * C) + (C * C1) 422 Constant *CC1 = ConstantExpr::getFMul(C, C1); 423 Value *XC = Builder.CreateFMulFMF(X, C, &I); 424 return BinaryOperator::CreateFAddFMF(XC, CC1, &I); 425 } 426 if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) { 427 // (C1 - X) * C --> (C * C1) - (X * C) 428 Constant *CC1 = ConstantExpr::getFMul(C, C1); 429 Value *XC = Builder.CreateFMulFMF(X, C, &I); 430 return BinaryOperator::CreateFSubFMF(CC1, XC, &I); 431 } 432 } 433 434 // sqrt(X) * sqrt(Y) -> sqrt(X * Y) 435 // nnan disallows the possibility of returning a number if both operands are 436 // negative (in that case, we should return NaN). 437 if (I.hasNoNaNs() && 438 match(Op0, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(X)))) && 439 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) { 440 Value *XY = Builder.CreateFMulFMF(X, Y, &I); 441 Value *Sqrt = Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &I); 442 return replaceInstUsesWith(I, Sqrt); 443 } 444 445 // (X*Y) * X => (X*X) * Y where Y != X 446 // The purpose is two-fold: 447 // 1) to form a power expression (of X). 448 // 2) potentially shorten the critical path: After transformation, the 449 // latency of the instruction Y is amortized by the expression of X*X, 450 // and therefore Y is in a "less critical" position compared to what it 451 // was before the transformation. 452 if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) && 453 Op1 != Y) { 454 Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I); 455 return BinaryOperator::CreateFMulFMF(XX, Y, &I); 456 } 457 if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) && 458 Op0 != Y) { 459 Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I); 460 return BinaryOperator::CreateFMulFMF(XX, Y, &I); 461 } 462 } 463 464 // log2(X * 0.5) * Y = log2(X) * Y - Y 465 if (I.isFast()) { 466 IntrinsicInst *Log2 = nullptr; 467 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>( 468 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) { 469 Log2 = cast<IntrinsicInst>(Op0); 470 Y = Op1; 471 } 472 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>( 473 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) { 474 Log2 = cast<IntrinsicInst>(Op1); 475 Y = Op0; 476 } 477 if (Log2) { 478 Log2->setArgOperand(0, X); 479 Log2->copyFastMathFlags(&I); 480 Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I); 481 return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I); 482 } 483 } 484 485 return nullptr; 486 } 487 488 /// Fold a divide or remainder with a select instruction divisor when one of the 489 /// select operands is zero. In that case, we can use the other select operand 490 /// because div/rem by zero is undefined. 491 bool InstCombiner::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) { 492 SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1)); 493 if (!SI) 494 return false; 495 496 int NonNullOperand; 497 if (match(SI->getTrueValue(), m_Zero())) 498 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y 499 NonNullOperand = 2; 500 else if (match(SI->getFalseValue(), m_Zero())) 501 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y 502 NonNullOperand = 1; 503 else 504 return false; 505 506 // Change the div/rem to use 'Y' instead of the select. 507 I.setOperand(1, SI->getOperand(NonNullOperand)); 508 509 // Okay, we know we replace the operand of the div/rem with 'Y' with no 510 // problem. However, the select, or the condition of the select may have 511 // multiple uses. Based on our knowledge that the operand must be non-zero, 512 // propagate the known value for the select into other uses of it, and 513 // propagate a known value of the condition into its other users. 514 515 // If the select and condition only have a single use, don't bother with this, 516 // early exit. 517 Value *SelectCond = SI->getCondition(); 518 if (SI->use_empty() && SelectCond->hasOneUse()) 519 return true; 520 521 // Scan the current block backward, looking for other uses of SI. 522 BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin(); 523 Type *CondTy = SelectCond->getType(); 524 while (BBI != BBFront) { 525 --BBI; 526 // If we found an instruction that we can't assume will return, so 527 // information from below it cannot be propagated above it. 528 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI)) 529 break; 530 531 // Replace uses of the select or its condition with the known values. 532 for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end(); 533 I != E; ++I) { 534 if (*I == SI) { 535 *I = SI->getOperand(NonNullOperand); 536 Worklist.Add(&*BBI); 537 } else if (*I == SelectCond) { 538 *I = NonNullOperand == 1 ? ConstantInt::getTrue(CondTy) 539 : ConstantInt::getFalse(CondTy); 540 Worklist.Add(&*BBI); 541 } 542 } 543 544 // If we past the instruction, quit looking for it. 545 if (&*BBI == SI) 546 SI = nullptr; 547 if (&*BBI == SelectCond) 548 SelectCond = nullptr; 549 550 // If we ran out of things to eliminate, break out of the loop. 551 if (!SelectCond && !SI) 552 break; 553 554 } 555 return true; 556 } 557 558 /// True if the multiply can not be expressed in an int this size. 559 static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product, 560 bool IsSigned) { 561 bool Overflow; 562 Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow); 563 return Overflow; 564 } 565 566 /// True if C1 is a multiple of C2. Quotient contains C1/C2. 567 static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient, 568 bool IsSigned) { 569 assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal"); 570 571 // Bail if we will divide by zero. 572 if (C2.isNullValue()) 573 return false; 574 575 // Bail if we would divide INT_MIN by -1. 576 if (IsSigned && C1.isMinSignedValue() && C2.isAllOnesValue()) 577 return false; 578 579 APInt Remainder(C1.getBitWidth(), /*Val=*/0ULL, IsSigned); 580 if (IsSigned) 581 APInt::sdivrem(C1, C2, Quotient, Remainder); 582 else 583 APInt::udivrem(C1, C2, Quotient, Remainder); 584 585 return Remainder.isMinValue(); 586 } 587 588 /// This function implements the transforms common to both integer division 589 /// instructions (udiv and sdiv). It is called by the visitors to those integer 590 /// division instructions. 591 /// Common integer divide transforms 592 Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) { 593 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 594 bool IsSigned = I.getOpcode() == Instruction::SDiv; 595 Type *Ty = I.getType(); 596 597 // The RHS is known non-zero. 598 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) { 599 I.setOperand(1, V); 600 return &I; 601 } 602 603 // Handle cases involving: [su]div X, (select Cond, Y, Z) 604 // This does not apply for fdiv. 605 if (simplifyDivRemOfSelectWithZeroOp(I)) 606 return &I; 607 608 const APInt *C2; 609 if (match(Op1, m_APInt(C2))) { 610 Value *X; 611 const APInt *C1; 612 613 // (X / C1) / C2 -> X / (C1*C2) 614 if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) || 615 (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) { 616 APInt Product(C1->getBitWidth(), /*Val=*/0ULL, IsSigned); 617 if (!multiplyOverflows(*C1, *C2, Product, IsSigned)) 618 return BinaryOperator::Create(I.getOpcode(), X, 619 ConstantInt::get(Ty, Product)); 620 } 621 622 if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) || 623 (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) { 624 APInt Quotient(C1->getBitWidth(), /*Val=*/0ULL, IsSigned); 625 626 // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1. 627 if (isMultiple(*C2, *C1, Quotient, IsSigned)) { 628 auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X, 629 ConstantInt::get(Ty, Quotient)); 630 NewDiv->setIsExact(I.isExact()); 631 return NewDiv; 632 } 633 634 // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2. 635 if (isMultiple(*C1, *C2, Quotient, IsSigned)) { 636 auto *Mul = BinaryOperator::Create(Instruction::Mul, X, 637 ConstantInt::get(Ty, Quotient)); 638 auto *OBO = cast<OverflowingBinaryOperator>(Op0); 639 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap()); 640 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap()); 641 return Mul; 642 } 643 } 644 645 if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) && 646 *C1 != C1->getBitWidth() - 1) || 647 (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))))) { 648 APInt Quotient(C1->getBitWidth(), /*Val=*/0ULL, IsSigned); 649 APInt C1Shifted = APInt::getOneBitSet( 650 C1->getBitWidth(), static_cast<unsigned>(C1->getLimitedValue())); 651 652 // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1. 653 if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) { 654 auto *BO = BinaryOperator::Create(I.getOpcode(), X, 655 ConstantInt::get(Ty, Quotient)); 656 BO->setIsExact(I.isExact()); 657 return BO; 658 } 659 660 // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2. 661 if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) { 662 auto *Mul = BinaryOperator::Create(Instruction::Mul, X, 663 ConstantInt::get(Ty, Quotient)); 664 auto *OBO = cast<OverflowingBinaryOperator>(Op0); 665 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap()); 666 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap()); 667 return Mul; 668 } 669 } 670 671 if (!C2->isNullValue()) // avoid X udiv 0 672 if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I)) 673 return FoldedDiv; 674 } 675 676 if (match(Op0, m_One())) { 677 assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?"); 678 if (IsSigned) { 679 // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the 680 // result is one, if Op1 is -1 then the result is minus one, otherwise 681 // it's zero. 682 Value *Inc = Builder.CreateAdd(Op1, Op0); 683 Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3)); 684 return SelectInst::Create(Cmp, Op1, ConstantInt::get(Ty, 0)); 685 } else { 686 // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the 687 // result is one, otherwise it's zero. 688 return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty); 689 } 690 } 691 692 // See if we can fold away this div instruction. 693 if (SimplifyDemandedInstructionBits(I)) 694 return &I; 695 696 // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y 697 Value *X, *Z; 698 if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1 699 if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) || 700 (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1))))) 701 return BinaryOperator::Create(I.getOpcode(), X, Op1); 702 703 // (X << Y) / X -> 1 << Y 704 Value *Y; 705 if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y)))) 706 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y); 707 if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y)))) 708 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y); 709 710 // X / (X * Y) -> 1 / Y if the multiplication does not overflow. 711 if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) { 712 bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap(); 713 bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap(); 714 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) { 715 I.setOperand(0, ConstantInt::get(Ty, 1)); 716 I.setOperand(1, Y); 717 return &I; 718 } 719 } 720 721 return nullptr; 722 } 723 724 static const unsigned MaxDepth = 6; 725 726 namespace { 727 728 using FoldUDivOperandCb = Instruction *(*)(Value *Op0, Value *Op1, 729 const BinaryOperator &I, 730 InstCombiner &IC); 731 732 /// Used to maintain state for visitUDivOperand(). 733 struct UDivFoldAction { 734 /// Informs visitUDiv() how to fold this operand. This can be zero if this 735 /// action joins two actions together. 736 FoldUDivOperandCb FoldAction; 737 738 /// Which operand to fold. 739 Value *OperandToFold; 740 741 union { 742 /// The instruction returned when FoldAction is invoked. 743 Instruction *FoldResult; 744 745 /// Stores the LHS action index if this action joins two actions together. 746 size_t SelectLHSIdx; 747 }; 748 749 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand) 750 : FoldAction(FA), OperandToFold(InputOperand), FoldResult(nullptr) {} 751 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS) 752 : FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {} 753 }; 754 755 } // end anonymous namespace 756 757 // X udiv 2^C -> X >> C 758 static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1, 759 const BinaryOperator &I, InstCombiner &IC) { 760 Constant *C1 = getLogBase2(Op0->getType(), cast<Constant>(Op1)); 761 if (!C1) 762 llvm_unreachable("Failed to constant fold udiv -> logbase2"); 763 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, C1); 764 if (I.isExact()) 765 LShr->setIsExact(); 766 return LShr; 767 } 768 769 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2) 770 // X udiv (zext (C1 << N)), where C1 is "1<<C2" --> X >> (N+C2) 771 static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I, 772 InstCombiner &IC) { 773 Value *ShiftLeft; 774 if (!match(Op1, m_ZExt(m_Value(ShiftLeft)))) 775 ShiftLeft = Op1; 776 777 Constant *CI; 778 Value *N; 779 if (!match(ShiftLeft, m_Shl(m_Constant(CI), m_Value(N)))) 780 llvm_unreachable("match should never fail here!"); 781 Constant *Log2Base = getLogBase2(N->getType(), CI); 782 if (!Log2Base) 783 llvm_unreachable("getLogBase2 should never fail here!"); 784 N = IC.Builder.CreateAdd(N, Log2Base); 785 if (Op1 != ShiftLeft) 786 N = IC.Builder.CreateZExt(N, Op1->getType()); 787 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N); 788 if (I.isExact()) 789 LShr->setIsExact(); 790 return LShr; 791 } 792 793 // Recursively visits the possible right hand operands of a udiv 794 // instruction, seeing through select instructions, to determine if we can 795 // replace the udiv with something simpler. If we find that an operand is not 796 // able to simplify the udiv, we abort the entire transformation. 797 static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I, 798 SmallVectorImpl<UDivFoldAction> &Actions, 799 unsigned Depth = 0) { 800 // Check to see if this is an unsigned division with an exact power of 2, 801 // if so, convert to a right shift. 802 if (match(Op1, m_Power2())) { 803 Actions.push_back(UDivFoldAction(foldUDivPow2Cst, Op1)); 804 return Actions.size(); 805 } 806 807 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2) 808 if (match(Op1, m_Shl(m_Power2(), m_Value())) || 809 match(Op1, m_ZExt(m_Shl(m_Power2(), m_Value())))) { 810 Actions.push_back(UDivFoldAction(foldUDivShl, Op1)); 811 return Actions.size(); 812 } 813 814 // The remaining tests are all recursive, so bail out if we hit the limit. 815 if (Depth++ == MaxDepth) 816 return 0; 817 818 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 819 if (size_t LHSIdx = 820 visitUDivOperand(Op0, SI->getOperand(1), I, Actions, Depth)) 821 if (visitUDivOperand(Op0, SI->getOperand(2), I, Actions, Depth)) { 822 Actions.push_back(UDivFoldAction(nullptr, Op1, LHSIdx - 1)); 823 return Actions.size(); 824 } 825 826 return 0; 827 } 828 829 /// If we have zero-extended operands of an unsigned div or rem, we may be able 830 /// to narrow the operation (sink the zext below the math). 831 static Instruction *narrowUDivURem(BinaryOperator &I, 832 InstCombiner::BuilderTy &Builder) { 833 Instruction::BinaryOps Opcode = I.getOpcode(); 834 Value *N = I.getOperand(0); 835 Value *D = I.getOperand(1); 836 Type *Ty = I.getType(); 837 Value *X, *Y; 838 if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) && 839 X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) { 840 // udiv (zext X), (zext Y) --> zext (udiv X, Y) 841 // urem (zext X), (zext Y) --> zext (urem X, Y) 842 Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y); 843 return new ZExtInst(NarrowOp, Ty); 844 } 845 846 Constant *C; 847 if ((match(N, m_OneUse(m_ZExt(m_Value(X)))) && match(D, m_Constant(C))) || 848 (match(D, m_OneUse(m_ZExt(m_Value(X)))) && match(N, m_Constant(C)))) { 849 // If the constant is the same in the smaller type, use the narrow version. 850 Constant *TruncC = ConstantExpr::getTrunc(C, X->getType()); 851 if (ConstantExpr::getZExt(TruncC, Ty) != C) 852 return nullptr; 853 854 // udiv (zext X), C --> zext (udiv X, C') 855 // urem (zext X), C --> zext (urem X, C') 856 // udiv C, (zext X) --> zext (udiv C', X) 857 // urem C, (zext X) --> zext (urem C', X) 858 Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC) 859 : Builder.CreateBinOp(Opcode, TruncC, X); 860 return new ZExtInst(NarrowOp, Ty); 861 } 862 863 return nullptr; 864 } 865 866 Instruction *InstCombiner::visitUDiv(BinaryOperator &I) { 867 if (Value *V = SimplifyUDivInst(I.getOperand(0), I.getOperand(1), 868 SQ.getWithInstruction(&I))) 869 return replaceInstUsesWith(I, V); 870 871 if (Instruction *X = foldVectorBinop(I)) 872 return X; 873 874 // Handle the integer div common cases 875 if (Instruction *Common = commonIDivTransforms(I)) 876 return Common; 877 878 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 879 Value *X; 880 const APInt *C1, *C2; 881 if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && match(Op1, m_APInt(C2))) { 882 // (X lshr C1) udiv C2 --> X udiv (C2 << C1) 883 bool Overflow; 884 APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow); 885 if (!Overflow) { 886 bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value())); 887 BinaryOperator *BO = BinaryOperator::CreateUDiv( 888 X, ConstantInt::get(X->getType(), C2ShlC1)); 889 if (IsExact) 890 BO->setIsExact(); 891 return BO; 892 } 893 } 894 895 // Op0 / C where C is large (negative) --> zext (Op0 >= C) 896 // TODO: Could use isKnownNegative() to handle non-constant values. 897 Type *Ty = I.getType(); 898 if (match(Op1, m_Negative())) { 899 Value *Cmp = Builder.CreateICmpUGE(Op0, Op1); 900 return CastInst::CreateZExtOrBitCast(Cmp, Ty); 901 } 902 // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined) 903 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { 904 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty)); 905 return CastInst::CreateZExtOrBitCast(Cmp, Ty); 906 } 907 908 if (Instruction *NarrowDiv = narrowUDivURem(I, Builder)) 909 return NarrowDiv; 910 911 // If the udiv operands are non-overflowing multiplies with a common operand, 912 // then eliminate the common factor: 913 // (A * B) / (A * X) --> B / X (and commuted variants) 914 // TODO: The code would be reduced if we had m_c_NUWMul pattern matching. 915 // TODO: If -reassociation handled this generally, we could remove this. 916 Value *A, *B; 917 if (match(Op0, m_NUWMul(m_Value(A), m_Value(B)))) { 918 if (match(Op1, m_NUWMul(m_Specific(A), m_Value(X))) || 919 match(Op1, m_NUWMul(m_Value(X), m_Specific(A)))) 920 return BinaryOperator::CreateUDiv(B, X); 921 if (match(Op1, m_NUWMul(m_Specific(B), m_Value(X))) || 922 match(Op1, m_NUWMul(m_Value(X), m_Specific(B)))) 923 return BinaryOperator::CreateUDiv(A, X); 924 } 925 926 // (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...)))) 927 SmallVector<UDivFoldAction, 6> UDivActions; 928 if (visitUDivOperand(Op0, Op1, I, UDivActions)) 929 for (unsigned i = 0, e = UDivActions.size(); i != e; ++i) { 930 FoldUDivOperandCb Action = UDivActions[i].FoldAction; 931 Value *ActionOp1 = UDivActions[i].OperandToFold; 932 Instruction *Inst; 933 if (Action) 934 Inst = Action(Op0, ActionOp1, I, *this); 935 else { 936 // This action joins two actions together. The RHS of this action is 937 // simply the last action we processed, we saved the LHS action index in 938 // the joining action. 939 size_t SelectRHSIdx = i - 1; 940 Value *SelectRHS = UDivActions[SelectRHSIdx].FoldResult; 941 size_t SelectLHSIdx = UDivActions[i].SelectLHSIdx; 942 Value *SelectLHS = UDivActions[SelectLHSIdx].FoldResult; 943 Inst = SelectInst::Create(cast<SelectInst>(ActionOp1)->getCondition(), 944 SelectLHS, SelectRHS); 945 } 946 947 // If this is the last action to process, return it to the InstCombiner. 948 // Otherwise, we insert it before the UDiv and record it so that we may 949 // use it as part of a joining action (i.e., a SelectInst). 950 if (e - i != 1) { 951 Inst->insertBefore(&I); 952 UDivActions[i].FoldResult = Inst; 953 } else 954 return Inst; 955 } 956 957 return nullptr; 958 } 959 960 Instruction *InstCombiner::visitSDiv(BinaryOperator &I) { 961 if (Value *V = SimplifySDivInst(I.getOperand(0), I.getOperand(1), 962 SQ.getWithInstruction(&I))) 963 return replaceInstUsesWith(I, V); 964 965 if (Instruction *X = foldVectorBinop(I)) 966 return X; 967 968 // Handle the integer div common cases 969 if (Instruction *Common = commonIDivTransforms(I)) 970 return Common; 971 972 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 973 Value *X; 974 // sdiv Op0, -1 --> -Op0 975 // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined) 976 if (match(Op1, m_AllOnes()) || 977 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) 978 return BinaryOperator::CreateNeg(Op0); 979 980 const APInt *Op1C; 981 if (match(Op1, m_APInt(Op1C))) { 982 // sdiv exact X, C --> ashr exact X, log2(C) 983 if (I.isExact() && Op1C->isNonNegative() && Op1C->isPowerOf2()) { 984 Value *ShAmt = ConstantInt::get(Op1->getType(), Op1C->exactLogBase2()); 985 return BinaryOperator::CreateExactAShr(Op0, ShAmt, I.getName()); 986 } 987 988 // If the dividend is sign-extended and the constant divisor is small enough 989 // to fit in the source type, shrink the division to the narrower type: 990 // (sext X) sdiv C --> sext (X sdiv C) 991 Value *Op0Src; 992 if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) && 993 Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) { 994 995 // In the general case, we need to make sure that the dividend is not the 996 // minimum signed value because dividing that by -1 is UB. But here, we 997 // know that the -1 divisor case is already handled above. 998 999 Constant *NarrowDivisor = 1000 ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType()); 1001 Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor); 1002 return new SExtInst(NarrowOp, Op0->getType()); 1003 } 1004 } 1005 1006 if (Constant *RHS = dyn_cast<Constant>(Op1)) { 1007 // X/INT_MIN -> X == INT_MIN 1008 if (RHS->isMinSignedValue()) 1009 return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), I.getType()); 1010 1011 // -X/C --> X/-C provided the negation doesn't overflow. 1012 Value *X; 1013 if (match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) { 1014 auto *BO = BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(RHS)); 1015 BO->setIsExact(I.isExact()); 1016 return BO; 1017 } 1018 } 1019 1020 // If the sign bits of both operands are zero (i.e. we can prove they are 1021 // unsigned inputs), turn this into a udiv. 1022 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); 1023 if (MaskedValueIsZero(Op0, Mask, 0, &I)) { 1024 if (MaskedValueIsZero(Op1, Mask, 0, &I)) { 1025 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set 1026 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); 1027 BO->setIsExact(I.isExact()); 1028 return BO; 1029 } 1030 1031 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { 1032 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y) 1033 // Safe because the only negative value (1 << Y) can take on is 1034 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have 1035 // the sign bit set. 1036 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); 1037 BO->setIsExact(I.isExact()); 1038 return BO; 1039 } 1040 } 1041 1042 return nullptr; 1043 } 1044 1045 /// Remove negation and try to convert division into multiplication. 1046 static Instruction *foldFDivConstantDivisor(BinaryOperator &I) { 1047 Constant *C; 1048 if (!match(I.getOperand(1), m_Constant(C))) 1049 return nullptr; 1050 1051 // -X / C --> X / -C 1052 Value *X; 1053 if (match(I.getOperand(0), m_FNeg(m_Value(X)))) 1054 return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I); 1055 1056 // If the constant divisor has an exact inverse, this is always safe. If not, 1057 // then we can still create a reciprocal if fast-math-flags allow it and the 1058 // constant is a regular number (not zero, infinite, or denormal). 1059 if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP()))) 1060 return nullptr; 1061 1062 // Disallow denormal constants because we don't know what would happen 1063 // on all targets. 1064 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that 1065 // denorms are flushed? 1066 auto *RecipC = ConstantExpr::getFDiv(ConstantFP::get(I.getType(), 1.0), C); 1067 if (!RecipC->isNormalFP()) 1068 return nullptr; 1069 1070 // X / C --> X * (1 / C) 1071 return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I); 1072 } 1073 1074 /// Remove negation and try to reassociate constant math. 1075 static Instruction *foldFDivConstantDividend(BinaryOperator &I) { 1076 Constant *C; 1077 if (!match(I.getOperand(0), m_Constant(C))) 1078 return nullptr; 1079 1080 // C / -X --> -C / X 1081 Value *X; 1082 if (match(I.getOperand(1), m_FNeg(m_Value(X)))) 1083 return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I); 1084 1085 if (!I.hasAllowReassoc() || !I.hasAllowReciprocal()) 1086 return nullptr; 1087 1088 // Try to reassociate C / X expressions where X includes another constant. 1089 Constant *C2, *NewC = nullptr; 1090 if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) { 1091 // C / (X * C2) --> (C / C2) / X 1092 NewC = ConstantExpr::getFDiv(C, C2); 1093 } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) { 1094 // C / (X / C2) --> (C * C2) / X 1095 NewC = ConstantExpr::getFMul(C, C2); 1096 } 1097 // Disallow denormal constants because we don't know what would happen 1098 // on all targets. 1099 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that 1100 // denorms are flushed? 1101 if (!NewC || !NewC->isNormalFP()) 1102 return nullptr; 1103 1104 return BinaryOperator::CreateFDivFMF(NewC, X, &I); 1105 } 1106 1107 Instruction *InstCombiner::visitFDiv(BinaryOperator &I) { 1108 if (Value *V = SimplifyFDivInst(I.getOperand(0), I.getOperand(1), 1109 I.getFastMathFlags(), 1110 SQ.getWithInstruction(&I))) 1111 return replaceInstUsesWith(I, V); 1112 1113 if (Instruction *X = foldVectorBinop(I)) 1114 return X; 1115 1116 if (Instruction *R = foldFDivConstantDivisor(I)) 1117 return R; 1118 1119 if (Instruction *R = foldFDivConstantDividend(I)) 1120 return R; 1121 1122 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1123 if (isa<Constant>(Op0)) 1124 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 1125 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1126 return R; 1127 1128 if (isa<Constant>(Op1)) 1129 if (SelectInst *SI = dyn_cast<SelectInst>(Op0)) 1130 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1131 return R; 1132 1133 if (I.hasAllowReassoc() && I.hasAllowReciprocal()) { 1134 Value *X, *Y; 1135 if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) && 1136 (!isa<Constant>(Y) || !isa<Constant>(Op1))) { 1137 // (X / Y) / Z => X / (Y * Z) 1138 Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I); 1139 return BinaryOperator::CreateFDivFMF(X, YZ, &I); 1140 } 1141 if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) && 1142 (!isa<Constant>(Y) || !isa<Constant>(Op0))) { 1143 // Z / (X / Y) => (Y * Z) / X 1144 Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I); 1145 return BinaryOperator::CreateFDivFMF(YZ, X, &I); 1146 } 1147 } 1148 1149 if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) { 1150 // sin(X) / cos(X) -> tan(X) 1151 // cos(X) / sin(X) -> 1/tan(X) (cotangent) 1152 Value *X; 1153 bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) && 1154 match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X))); 1155 bool IsCot = 1156 !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) && 1157 match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X))); 1158 1159 if ((IsTan || IsCot) && hasUnaryFloatFn(&TLI, I.getType(), LibFunc_tan, 1160 LibFunc_tanf, LibFunc_tanl)) { 1161 IRBuilder<> B(&I); 1162 IRBuilder<>::FastMathFlagGuard FMFGuard(B); 1163 B.setFastMathFlags(I.getFastMathFlags()); 1164 AttributeList Attrs = CallSite(Op0).getCalledFunction()->getAttributes(); 1165 Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf, 1166 LibFunc_tanl, B, Attrs); 1167 if (IsCot) 1168 Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res); 1169 return replaceInstUsesWith(I, Res); 1170 } 1171 } 1172 1173 // -X / -Y -> X / Y 1174 Value *X, *Y; 1175 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) { 1176 I.setOperand(0, X); 1177 I.setOperand(1, Y); 1178 return &I; 1179 } 1180 1181 // X / (X * Y) --> 1.0 / Y 1182 // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed. 1183 // We can ignore the possibility that X is infinity because INF/INF is NaN. 1184 if (I.hasNoNaNs() && I.hasAllowReassoc() && 1185 match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) { 1186 I.setOperand(0, ConstantFP::get(I.getType(), 1.0)); 1187 I.setOperand(1, Y); 1188 return &I; 1189 } 1190 1191 return nullptr; 1192 } 1193 1194 /// This function implements the transforms common to both integer remainder 1195 /// instructions (urem and srem). It is called by the visitors to those integer 1196 /// remainder instructions. 1197 /// Common integer remainder transforms 1198 Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) { 1199 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1200 1201 // The RHS is known non-zero. 1202 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I)) { 1203 I.setOperand(1, V); 1204 return &I; 1205 } 1206 1207 // Handle cases involving: rem X, (select Cond, Y, Z) 1208 if (simplifyDivRemOfSelectWithZeroOp(I)) 1209 return &I; 1210 1211 if (isa<Constant>(Op1)) { 1212 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) { 1213 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) { 1214 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1215 return R; 1216 } else if (auto *PN = dyn_cast<PHINode>(Op0I)) { 1217 const APInt *Op1Int; 1218 if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() && 1219 (I.getOpcode() == Instruction::URem || 1220 !Op1Int->isMinSignedValue())) { 1221 // foldOpIntoPhi will speculate instructions to the end of the PHI's 1222 // predecessor blocks, so do this only if we know the srem or urem 1223 // will not fault. 1224 if (Instruction *NV = foldOpIntoPhi(I, PN)) 1225 return NV; 1226 } 1227 } 1228 1229 // See if we can fold away this rem instruction. 1230 if (SimplifyDemandedInstructionBits(I)) 1231 return &I; 1232 } 1233 } 1234 1235 return nullptr; 1236 } 1237 1238 Instruction *InstCombiner::visitURem(BinaryOperator &I) { 1239 if (Value *V = SimplifyURemInst(I.getOperand(0), I.getOperand(1), 1240 SQ.getWithInstruction(&I))) 1241 return replaceInstUsesWith(I, V); 1242 1243 if (Instruction *X = foldVectorBinop(I)) 1244 return X; 1245 1246 if (Instruction *common = commonIRemTransforms(I)) 1247 return common; 1248 1249 if (Instruction *NarrowRem = narrowUDivURem(I, Builder)) 1250 return NarrowRem; 1251 1252 // X urem Y -> X and Y-1, where Y is a power of 2, 1253 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1254 Type *Ty = I.getType(); 1255 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { 1256 Constant *N1 = Constant::getAllOnesValue(Ty); 1257 Value *Add = Builder.CreateAdd(Op1, N1); 1258 return BinaryOperator::CreateAnd(Op0, Add); 1259 } 1260 1261 // 1 urem X -> zext(X != 1) 1262 if (match(Op0, m_One())) 1263 return CastInst::CreateZExtOrBitCast(Builder.CreateICmpNE(Op1, Op0), Ty); 1264 1265 // X urem C -> X < C ? X : X - C, where C >= signbit. 1266 if (match(Op1, m_Negative())) { 1267 Value *Cmp = Builder.CreateICmpULT(Op0, Op1); 1268 Value *Sub = Builder.CreateSub(Op0, Op1); 1269 return SelectInst::Create(Cmp, Op0, Sub); 1270 } 1271 1272 // If the divisor is a sext of a boolean, then the divisor must be max 1273 // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also 1274 // max unsigned value. In that case, the remainder is 0: 1275 // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0 1276 Value *X; 1277 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { 1278 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty)); 1279 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Op0); 1280 } 1281 1282 return nullptr; 1283 } 1284 1285 Instruction *InstCombiner::visitSRem(BinaryOperator &I) { 1286 if (Value *V = SimplifySRemInst(I.getOperand(0), I.getOperand(1), 1287 SQ.getWithInstruction(&I))) 1288 return replaceInstUsesWith(I, V); 1289 1290 if (Instruction *X = foldVectorBinop(I)) 1291 return X; 1292 1293 // Handle the integer rem common cases 1294 if (Instruction *Common = commonIRemTransforms(I)) 1295 return Common; 1296 1297 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1298 { 1299 const APInt *Y; 1300 // X % -Y -> X % Y 1301 if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue()) { 1302 Worklist.AddValue(I.getOperand(1)); 1303 I.setOperand(1, ConstantInt::get(I.getType(), -*Y)); 1304 return &I; 1305 } 1306 } 1307 1308 // If the sign bits of both operands are zero (i.e. we can prove they are 1309 // unsigned inputs), turn this into a urem. 1310 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits())); 1311 if (MaskedValueIsZero(Op1, Mask, 0, &I) && 1312 MaskedValueIsZero(Op0, Mask, 0, &I)) { 1313 // X srem Y -> X urem Y, iff X and Y don't have sign bit set 1314 return BinaryOperator::CreateURem(Op0, Op1, I.getName()); 1315 } 1316 1317 // If it's a constant vector, flip any negative values positive. 1318 if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) { 1319 Constant *C = cast<Constant>(Op1); 1320 unsigned VWidth = C->getType()->getVectorNumElements(); 1321 1322 bool hasNegative = false; 1323 bool hasMissing = false; 1324 for (unsigned i = 0; i != VWidth; ++i) { 1325 Constant *Elt = C->getAggregateElement(i); 1326 if (!Elt) { 1327 hasMissing = true; 1328 break; 1329 } 1330 1331 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt)) 1332 if (RHS->isNegative()) 1333 hasNegative = true; 1334 } 1335 1336 if (hasNegative && !hasMissing) { 1337 SmallVector<Constant *, 16> Elts(VWidth); 1338 for (unsigned i = 0; i != VWidth; ++i) { 1339 Elts[i] = C->getAggregateElement(i); // Handle undef, etc. 1340 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) { 1341 if (RHS->isNegative()) 1342 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS)); 1343 } 1344 } 1345 1346 Constant *NewRHSV = ConstantVector::get(Elts); 1347 if (NewRHSV != C) { // Don't loop on -MININT 1348 Worklist.AddValue(I.getOperand(1)); 1349 I.setOperand(1, NewRHSV); 1350 return &I; 1351 } 1352 } 1353 } 1354 1355 return nullptr; 1356 } 1357 1358 Instruction *InstCombiner::visitFRem(BinaryOperator &I) { 1359 if (Value *V = SimplifyFRemInst(I.getOperand(0), I.getOperand(1), 1360 I.getFastMathFlags(), 1361 SQ.getWithInstruction(&I))) 1362 return replaceInstUsesWith(I, V); 1363 1364 if (Instruction *X = foldVectorBinop(I)) 1365 return X; 1366 1367 return nullptr; 1368 } 1369