1 //===- InstCombineAddSub.cpp ------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for add, fadd, sub, and fsub. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/APFloat.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/ValueTracking.h" 20 #include "llvm/IR/Constant.h" 21 #include "llvm/IR/Constants.h" 22 #include "llvm/IR/InstrTypes.h" 23 #include "llvm/IR/Instruction.h" 24 #include "llvm/IR/Instructions.h" 25 #include "llvm/IR/Operator.h" 26 #include "llvm/IR/PatternMatch.h" 27 #include "llvm/IR/Type.h" 28 #include "llvm/IR/Value.h" 29 #include "llvm/Support/AlignOf.h" 30 #include "llvm/Support/Casting.h" 31 #include "llvm/Support/KnownBits.h" 32 #include <cassert> 33 #include <utility> 34 35 using namespace llvm; 36 using namespace PatternMatch; 37 38 #define DEBUG_TYPE "instcombine" 39 40 namespace { 41 42 /// Class representing coefficient of floating-point addend. 43 /// This class needs to be highly efficient, which is especially true for 44 /// the constructor. As of I write this comment, the cost of the default 45 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to 46 /// perform write-merging). 47 /// 48 class FAddendCoef { 49 public: 50 // The constructor has to initialize a APFloat, which is unnecessary for 51 // most addends which have coefficient either 1 or -1. So, the constructor 52 // is expensive. In order to avoid the cost of the constructor, we should 53 // reuse some instances whenever possible. The pre-created instances 54 // FAddCombine::Add[0-5] embodies this idea. 55 FAddendCoef() = default; 56 ~FAddendCoef(); 57 58 // If possible, don't define operator+/operator- etc because these 59 // operators inevitably call FAddendCoef's constructor which is not cheap. 60 void operator=(const FAddendCoef &A); 61 void operator+=(const FAddendCoef &A); 62 void operator*=(const FAddendCoef &S); 63 64 void set(short C) { 65 assert(!insaneIntVal(C) && "Insane coefficient"); 66 IsFp = false; IntVal = C; 67 } 68 69 void set(const APFloat& C); 70 71 void negate(); 72 73 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); } 74 Value *getValue(Type *) const; 75 76 bool isOne() const { return isInt() && IntVal == 1; } 77 bool isTwo() const { return isInt() && IntVal == 2; } 78 bool isMinusOne() const { return isInt() && IntVal == -1; } 79 bool isMinusTwo() const { return isInt() && IntVal == -2; } 80 81 private: 82 bool insaneIntVal(int V) { return V > 4 || V < -4; } 83 84 APFloat *getFpValPtr() 85 { return reinterpret_cast<APFloat *>(&FpValBuf.buffer[0]); } 86 87 const APFloat *getFpValPtr() const 88 { return reinterpret_cast<const APFloat *>(&FpValBuf.buffer[0]); } 89 90 const APFloat &getFpVal() const { 91 assert(IsFp && BufHasFpVal && "Incorret state"); 92 return *getFpValPtr(); 93 } 94 95 APFloat &getFpVal() { 96 assert(IsFp && BufHasFpVal && "Incorret state"); 97 return *getFpValPtr(); 98 } 99 100 bool isInt() const { return !IsFp; } 101 102 // If the coefficient is represented by an integer, promote it to a 103 // floating point. 104 void convertToFpType(const fltSemantics &Sem); 105 106 // Construct an APFloat from a signed integer. 107 // TODO: We should get rid of this function when APFloat can be constructed 108 // from an *SIGNED* integer. 109 APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val); 110 111 bool IsFp = false; 112 113 // True iff FpValBuf contains an instance of APFloat. 114 bool BufHasFpVal = false; 115 116 // The integer coefficient of an individual addend is either 1 or -1, 117 // and we try to simplify at most 4 addends from neighboring at most 118 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt 119 // is overkill of this end. 120 short IntVal = 0; 121 122 AlignedCharArrayUnion<APFloat> FpValBuf; 123 }; 124 125 /// FAddend is used to represent floating-point addend. An addend is 126 /// represented as <C, V>, where the V is a symbolic value, and C is a 127 /// constant coefficient. A constant addend is represented as <C, 0>. 128 class FAddend { 129 public: 130 FAddend() = default; 131 132 void operator+=(const FAddend &T) { 133 assert((Val == T.Val) && "Symbolic-values disagree"); 134 Coeff += T.Coeff; 135 } 136 137 Value *getSymVal() const { return Val; } 138 const FAddendCoef &getCoef() const { return Coeff; } 139 140 bool isConstant() const { return Val == nullptr; } 141 bool isZero() const { return Coeff.isZero(); } 142 143 void set(short Coefficient, Value *V) { 144 Coeff.set(Coefficient); 145 Val = V; 146 } 147 void set(const APFloat &Coefficient, Value *V) { 148 Coeff.set(Coefficient); 149 Val = V; 150 } 151 void set(const ConstantFP *Coefficient, Value *V) { 152 Coeff.set(Coefficient->getValueAPF()); 153 Val = V; 154 } 155 156 void negate() { Coeff.negate(); } 157 158 /// Drill down the U-D chain one step to find the definition of V, and 159 /// try to break the definition into one or two addends. 160 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1); 161 162 /// Similar to FAddend::drillDownOneStep() except that the value being 163 /// splitted is the addend itself. 164 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const; 165 166 private: 167 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; } 168 169 // This addend has the value of "Coeff * Val". 170 Value *Val = nullptr; 171 FAddendCoef Coeff; 172 }; 173 174 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along 175 /// with its neighboring at most two instructions. 176 /// 177 class FAddCombine { 178 public: 179 FAddCombine(InstCombiner::BuilderTy &B) : Builder(B) {} 180 181 Value *simplify(Instruction *FAdd); 182 183 private: 184 using AddendVect = SmallVector<const FAddend *, 4>; 185 186 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota); 187 188 /// Convert given addend to a Value 189 Value *createAddendVal(const FAddend &A, bool& NeedNeg); 190 191 /// Return the number of instructions needed to emit the N-ary addition. 192 unsigned calcInstrNumber(const AddendVect& Vect); 193 194 Value *createFSub(Value *Opnd0, Value *Opnd1); 195 Value *createFAdd(Value *Opnd0, Value *Opnd1); 196 Value *createFMul(Value *Opnd0, Value *Opnd1); 197 Value *createFNeg(Value *V); 198 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota); 199 void createInstPostProc(Instruction *NewInst, bool NoNumber = false); 200 201 // Debugging stuff are clustered here. 202 #ifndef NDEBUG 203 unsigned CreateInstrNum; 204 void initCreateInstNum() { CreateInstrNum = 0; } 205 void incCreateInstNum() { CreateInstrNum++; } 206 #else 207 void initCreateInstNum() {} 208 void incCreateInstNum() {} 209 #endif 210 211 InstCombiner::BuilderTy &Builder; 212 Instruction *Instr = nullptr; 213 }; 214 215 } // end anonymous namespace 216 217 //===----------------------------------------------------------------------===// 218 // 219 // Implementation of 220 // {FAddendCoef, FAddend, FAddition, FAddCombine}. 221 // 222 //===----------------------------------------------------------------------===// 223 FAddendCoef::~FAddendCoef() { 224 if (BufHasFpVal) 225 getFpValPtr()->~APFloat(); 226 } 227 228 void FAddendCoef::set(const APFloat& C) { 229 APFloat *P = getFpValPtr(); 230 231 if (isInt()) { 232 // As the buffer is meanless byte stream, we cannot call 233 // APFloat::operator=(). 234 new(P) APFloat(C); 235 } else 236 *P = C; 237 238 IsFp = BufHasFpVal = true; 239 } 240 241 void FAddendCoef::convertToFpType(const fltSemantics &Sem) { 242 if (!isInt()) 243 return; 244 245 APFloat *P = getFpValPtr(); 246 if (IntVal > 0) 247 new(P) APFloat(Sem, IntVal); 248 else { 249 new(P) APFloat(Sem, 0 - IntVal); 250 P->changeSign(); 251 } 252 IsFp = BufHasFpVal = true; 253 } 254 255 APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) { 256 if (Val >= 0) 257 return APFloat(Sem, Val); 258 259 APFloat T(Sem, 0 - Val); 260 T.changeSign(); 261 262 return T; 263 } 264 265 void FAddendCoef::operator=(const FAddendCoef &That) { 266 if (That.isInt()) 267 set(That.IntVal); 268 else 269 set(That.getFpVal()); 270 } 271 272 void FAddendCoef::operator+=(const FAddendCoef &That) { 273 enum APFloat::roundingMode RndMode = APFloat::rmNearestTiesToEven; 274 if (isInt() == That.isInt()) { 275 if (isInt()) 276 IntVal += That.IntVal; 277 else 278 getFpVal().add(That.getFpVal(), RndMode); 279 return; 280 } 281 282 if (isInt()) { 283 const APFloat &T = That.getFpVal(); 284 convertToFpType(T.getSemantics()); 285 getFpVal().add(T, RndMode); 286 return; 287 } 288 289 APFloat &T = getFpVal(); 290 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode); 291 } 292 293 void FAddendCoef::operator*=(const FAddendCoef &That) { 294 if (That.isOne()) 295 return; 296 297 if (That.isMinusOne()) { 298 negate(); 299 return; 300 } 301 302 if (isInt() && That.isInt()) { 303 int Res = IntVal * (int)That.IntVal; 304 assert(!insaneIntVal(Res) && "Insane int value"); 305 IntVal = Res; 306 return; 307 } 308 309 const fltSemantics &Semantic = 310 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics(); 311 312 if (isInt()) 313 convertToFpType(Semantic); 314 APFloat &F0 = getFpVal(); 315 316 if (That.isInt()) 317 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal), 318 APFloat::rmNearestTiesToEven); 319 else 320 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven); 321 } 322 323 void FAddendCoef::negate() { 324 if (isInt()) 325 IntVal = 0 - IntVal; 326 else 327 getFpVal().changeSign(); 328 } 329 330 Value *FAddendCoef::getValue(Type *Ty) const { 331 return isInt() ? 332 ConstantFP::get(Ty, float(IntVal)) : 333 ConstantFP::get(Ty->getContext(), getFpVal()); 334 } 335 336 // The definition of <Val> Addends 337 // ========================================= 338 // A + B <1, A>, <1,B> 339 // A - B <1, A>, <1,B> 340 // 0 - B <-1, B> 341 // C * A, <C, A> 342 // A + C <1, A> <C, NULL> 343 // 0 +/- 0 <0, NULL> (corner case) 344 // 345 // Legend: A and B are not constant, C is constant 346 unsigned FAddend::drillValueDownOneStep 347 (Value *Val, FAddend &Addend0, FAddend &Addend1) { 348 Instruction *I = nullptr; 349 if (!Val || !(I = dyn_cast<Instruction>(Val))) 350 return 0; 351 352 unsigned Opcode = I->getOpcode(); 353 354 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) { 355 ConstantFP *C0, *C1; 356 Value *Opnd0 = I->getOperand(0); 357 Value *Opnd1 = I->getOperand(1); 358 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero()) 359 Opnd0 = nullptr; 360 361 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero()) 362 Opnd1 = nullptr; 363 364 if (Opnd0) { 365 if (!C0) 366 Addend0.set(1, Opnd0); 367 else 368 Addend0.set(C0, nullptr); 369 } 370 371 if (Opnd1) { 372 FAddend &Addend = Opnd0 ? Addend1 : Addend0; 373 if (!C1) 374 Addend.set(1, Opnd1); 375 else 376 Addend.set(C1, nullptr); 377 if (Opcode == Instruction::FSub) 378 Addend.negate(); 379 } 380 381 if (Opnd0 || Opnd1) 382 return Opnd0 && Opnd1 ? 2 : 1; 383 384 // Both operands are zero. Weird! 385 Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr); 386 return 1; 387 } 388 389 if (I->getOpcode() == Instruction::FMul) { 390 Value *V0 = I->getOperand(0); 391 Value *V1 = I->getOperand(1); 392 if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) { 393 Addend0.set(C, V1); 394 return 1; 395 } 396 397 if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) { 398 Addend0.set(C, V0); 399 return 1; 400 } 401 } 402 403 return 0; 404 } 405 406 // Try to break *this* addend into two addends. e.g. Suppose this addend is 407 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends, 408 // i.e. <2.3, X> and <2.3, Y>. 409 unsigned FAddend::drillAddendDownOneStep 410 (FAddend &Addend0, FAddend &Addend1) const { 411 if (isConstant()) 412 return 0; 413 414 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1); 415 if (!BreakNum || Coeff.isOne()) 416 return BreakNum; 417 418 Addend0.Scale(Coeff); 419 420 if (BreakNum == 2) 421 Addend1.Scale(Coeff); 422 423 return BreakNum; 424 } 425 426 Value *FAddCombine::simplify(Instruction *I) { 427 assert(I->hasAllowReassoc() && I->hasNoSignedZeros() && 428 "Expected 'reassoc'+'nsz' instruction"); 429 430 // Currently we are not able to handle vector type. 431 if (I->getType()->isVectorTy()) 432 return nullptr; 433 434 assert((I->getOpcode() == Instruction::FAdd || 435 I->getOpcode() == Instruction::FSub) && "Expect add/sub"); 436 437 // Save the instruction before calling other member-functions. 438 Instr = I; 439 440 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1; 441 442 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1); 443 444 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1. 445 unsigned Opnd0_ExpNum = 0; 446 unsigned Opnd1_ExpNum = 0; 447 448 if (!Opnd0.isConstant()) 449 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1); 450 451 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1. 452 if (OpndNum == 2 && !Opnd1.isConstant()) 453 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1); 454 455 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1 456 if (Opnd0_ExpNum && Opnd1_ExpNum) { 457 AddendVect AllOpnds; 458 AllOpnds.push_back(&Opnd0_0); 459 AllOpnds.push_back(&Opnd1_0); 460 if (Opnd0_ExpNum == 2) 461 AllOpnds.push_back(&Opnd0_1); 462 if (Opnd1_ExpNum == 2) 463 AllOpnds.push_back(&Opnd1_1); 464 465 // Compute instruction quota. We should save at least one instruction. 466 unsigned InstQuota = 0; 467 468 Value *V0 = I->getOperand(0); 469 Value *V1 = I->getOperand(1); 470 InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) && 471 (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1; 472 473 if (Value *R = simplifyFAdd(AllOpnds, InstQuota)) 474 return R; 475 } 476 477 if (OpndNum != 2) { 478 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be 479 // splitted into two addends, say "V = X - Y", the instruction would have 480 // been optimized into "I = Y - X" in the previous steps. 481 // 482 const FAddendCoef &CE = Opnd0.getCoef(); 483 return CE.isOne() ? Opnd0.getSymVal() : nullptr; 484 } 485 486 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1] 487 if (Opnd1_ExpNum) { 488 AddendVect AllOpnds; 489 AllOpnds.push_back(&Opnd0); 490 AllOpnds.push_back(&Opnd1_0); 491 if (Opnd1_ExpNum == 2) 492 AllOpnds.push_back(&Opnd1_1); 493 494 if (Value *R = simplifyFAdd(AllOpnds, 1)) 495 return R; 496 } 497 498 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1] 499 if (Opnd0_ExpNum) { 500 AddendVect AllOpnds; 501 AllOpnds.push_back(&Opnd1); 502 AllOpnds.push_back(&Opnd0_0); 503 if (Opnd0_ExpNum == 2) 504 AllOpnds.push_back(&Opnd0_1); 505 506 if (Value *R = simplifyFAdd(AllOpnds, 1)) 507 return R; 508 } 509 510 return nullptr; 511 } 512 513 Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) { 514 unsigned AddendNum = Addends.size(); 515 assert(AddendNum <= 4 && "Too many addends"); 516 517 // For saving intermediate results; 518 unsigned NextTmpIdx = 0; 519 FAddend TmpResult[3]; 520 521 // Points to the constant addend of the resulting simplified expression. 522 // If the resulting expr has constant-addend, this constant-addend is 523 // desirable to reside at the top of the resulting expression tree. Placing 524 // constant close to supper-expr(s) will potentially reveal some optimization 525 // opportunities in super-expr(s). 526 const FAddend *ConstAdd = nullptr; 527 528 // Simplified addends are placed <SimpVect>. 529 AddendVect SimpVect; 530 531 // The outer loop works on one symbolic-value at a time. Suppose the input 532 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ... 533 // The symbolic-values will be processed in this order: x, y, z. 534 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) { 535 536 const FAddend *ThisAddend = Addends[SymIdx]; 537 if (!ThisAddend) { 538 // This addend was processed before. 539 continue; 540 } 541 542 Value *Val = ThisAddend->getSymVal(); 543 unsigned StartIdx = SimpVect.size(); 544 SimpVect.push_back(ThisAddend); 545 546 // The inner loop collects addends sharing same symbolic-value, and these 547 // addends will be later on folded into a single addend. Following above 548 // example, if the symbolic value "y" is being processed, the inner loop 549 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will 550 // be later on folded into "<b1+b2, y>". 551 for (unsigned SameSymIdx = SymIdx + 1; 552 SameSymIdx < AddendNum; SameSymIdx++) { 553 const FAddend *T = Addends[SameSymIdx]; 554 if (T && T->getSymVal() == Val) { 555 // Set null such that next iteration of the outer loop will not process 556 // this addend again. 557 Addends[SameSymIdx] = nullptr; 558 SimpVect.push_back(T); 559 } 560 } 561 562 // If multiple addends share same symbolic value, fold them together. 563 if (StartIdx + 1 != SimpVect.size()) { 564 FAddend &R = TmpResult[NextTmpIdx ++]; 565 R = *SimpVect[StartIdx]; 566 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++) 567 R += *SimpVect[Idx]; 568 569 // Pop all addends being folded and push the resulting folded addend. 570 SimpVect.resize(StartIdx); 571 if (Val) { 572 if (!R.isZero()) { 573 SimpVect.push_back(&R); 574 } 575 } else { 576 // Don't push constant addend at this time. It will be the last element 577 // of <SimpVect>. 578 ConstAdd = &R; 579 } 580 } 581 } 582 583 assert((NextTmpIdx <= array_lengthof(TmpResult) + 1) && 584 "out-of-bound access"); 585 586 if (ConstAdd) 587 SimpVect.push_back(ConstAdd); 588 589 Value *Result; 590 if (!SimpVect.empty()) 591 Result = createNaryFAdd(SimpVect, InstrQuota); 592 else { 593 // The addition is folded to 0.0. 594 Result = ConstantFP::get(Instr->getType(), 0.0); 595 } 596 597 return Result; 598 } 599 600 Value *FAddCombine::createNaryFAdd 601 (const AddendVect &Opnds, unsigned InstrQuota) { 602 assert(!Opnds.empty() && "Expect at least one addend"); 603 604 // Step 1: Check if the # of instructions needed exceeds the quota. 605 606 unsigned InstrNeeded = calcInstrNumber(Opnds); 607 if (InstrNeeded > InstrQuota) 608 return nullptr; 609 610 initCreateInstNum(); 611 612 // step 2: Emit the N-ary addition. 613 // Note that at most three instructions are involved in Fadd-InstCombine: the 614 // addition in question, and at most two neighboring instructions. 615 // The resulting optimized addition should have at least one less instruction 616 // than the original addition expression tree. This implies that the resulting 617 // N-ary addition has at most two instructions, and we don't need to worry 618 // about tree-height when constructing the N-ary addition. 619 620 Value *LastVal = nullptr; 621 bool LastValNeedNeg = false; 622 623 // Iterate the addends, creating fadd/fsub using adjacent two addends. 624 for (const FAddend *Opnd : Opnds) { 625 bool NeedNeg; 626 Value *V = createAddendVal(*Opnd, NeedNeg); 627 if (!LastVal) { 628 LastVal = V; 629 LastValNeedNeg = NeedNeg; 630 continue; 631 } 632 633 if (LastValNeedNeg == NeedNeg) { 634 LastVal = createFAdd(LastVal, V); 635 continue; 636 } 637 638 if (LastValNeedNeg) 639 LastVal = createFSub(V, LastVal); 640 else 641 LastVal = createFSub(LastVal, V); 642 643 LastValNeedNeg = false; 644 } 645 646 if (LastValNeedNeg) { 647 LastVal = createFNeg(LastVal); 648 } 649 650 #ifndef NDEBUG 651 assert(CreateInstrNum == InstrNeeded && 652 "Inconsistent in instruction numbers"); 653 #endif 654 655 return LastVal; 656 } 657 658 Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) { 659 Value *V = Builder.CreateFSub(Opnd0, Opnd1); 660 if (Instruction *I = dyn_cast<Instruction>(V)) 661 createInstPostProc(I); 662 return V; 663 } 664 665 Value *FAddCombine::createFNeg(Value *V) { 666 Value *Zero = cast<Value>(ConstantFP::getZeroValueForNegation(V->getType())); 667 Value *NewV = createFSub(Zero, V); 668 if (Instruction *I = dyn_cast<Instruction>(NewV)) 669 createInstPostProc(I, true); // fneg's don't receive instruction numbers. 670 return NewV; 671 } 672 673 Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) { 674 Value *V = Builder.CreateFAdd(Opnd0, Opnd1); 675 if (Instruction *I = dyn_cast<Instruction>(V)) 676 createInstPostProc(I); 677 return V; 678 } 679 680 Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) { 681 Value *V = Builder.CreateFMul(Opnd0, Opnd1); 682 if (Instruction *I = dyn_cast<Instruction>(V)) 683 createInstPostProc(I); 684 return V; 685 } 686 687 void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) { 688 NewInstr->setDebugLoc(Instr->getDebugLoc()); 689 690 // Keep track of the number of instruction created. 691 if (!NoNumber) 692 incCreateInstNum(); 693 694 // Propagate fast-math flags 695 NewInstr->setFastMathFlags(Instr->getFastMathFlags()); 696 } 697 698 // Return the number of instruction needed to emit the N-ary addition. 699 // NOTE: Keep this function in sync with createAddendVal(). 700 unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) { 701 unsigned OpndNum = Opnds.size(); 702 unsigned InstrNeeded = OpndNum - 1; 703 704 // The number of addends in the form of "(-1)*x". 705 unsigned NegOpndNum = 0; 706 707 // Adjust the number of instructions needed to emit the N-ary add. 708 for (const FAddend *Opnd : Opnds) { 709 if (Opnd->isConstant()) 710 continue; 711 712 // The constant check above is really for a few special constant 713 // coefficients. 714 if (isa<UndefValue>(Opnd->getSymVal())) 715 continue; 716 717 const FAddendCoef &CE = Opnd->getCoef(); 718 if (CE.isMinusOne() || CE.isMinusTwo()) 719 NegOpndNum++; 720 721 // Let the addend be "c * x". If "c == +/-1", the value of the addend 722 // is immediately available; otherwise, it needs exactly one instruction 723 // to evaluate the value. 724 if (!CE.isMinusOne() && !CE.isOne()) 725 InstrNeeded++; 726 } 727 if (NegOpndNum == OpndNum) 728 InstrNeeded++; 729 return InstrNeeded; 730 } 731 732 // Input Addend Value NeedNeg(output) 733 // ================================================================ 734 // Constant C C false 735 // <+/-1, V> V coefficient is -1 736 // <2/-2, V> "fadd V, V" coefficient is -2 737 // <C, V> "fmul V, C" false 738 // 739 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber. 740 Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) { 741 const FAddendCoef &Coeff = Opnd.getCoef(); 742 743 if (Opnd.isConstant()) { 744 NeedNeg = false; 745 return Coeff.getValue(Instr->getType()); 746 } 747 748 Value *OpndVal = Opnd.getSymVal(); 749 750 if (Coeff.isMinusOne() || Coeff.isOne()) { 751 NeedNeg = Coeff.isMinusOne(); 752 return OpndVal; 753 } 754 755 if (Coeff.isTwo() || Coeff.isMinusTwo()) { 756 NeedNeg = Coeff.isMinusTwo(); 757 return createFAdd(OpndVal, OpndVal); 758 } 759 760 NeedNeg = false; 761 return createFMul(OpndVal, Coeff.getValue(Instr->getType())); 762 } 763 764 // Checks if any operand is negative and we can convert add to sub. 765 // This function checks for following negative patterns 766 // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C)) 767 // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C)) 768 // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even 769 static Value *checkForNegativeOperand(BinaryOperator &I, 770 InstCombiner::BuilderTy &Builder) { 771 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 772 773 // This function creates 2 instructions to replace ADD, we need at least one 774 // of LHS or RHS to have one use to ensure benefit in transform. 775 if (!LHS->hasOneUse() && !RHS->hasOneUse()) 776 return nullptr; 777 778 Value *X = nullptr, *Y = nullptr, *Z = nullptr; 779 const APInt *C1 = nullptr, *C2 = nullptr; 780 781 // if ONE is on other side, swap 782 if (match(RHS, m_Add(m_Value(X), m_One()))) 783 std::swap(LHS, RHS); 784 785 if (match(LHS, m_Add(m_Value(X), m_One()))) { 786 // if XOR on other side, swap 787 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1)))) 788 std::swap(X, RHS); 789 790 if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) { 791 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1)) 792 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1)) 793 if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) { 794 Value *NewAnd = Builder.CreateAnd(Z, *C1); 795 return Builder.CreateSub(RHS, NewAnd, "sub"); 796 } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) { 797 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1)) 798 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1)) 799 Value *NewOr = Builder.CreateOr(Z, ~(*C1)); 800 return Builder.CreateSub(RHS, NewOr, "sub"); 801 } 802 } 803 } 804 805 // Restore LHS and RHS 806 LHS = I.getOperand(0); 807 RHS = I.getOperand(1); 808 809 // if XOR is on other side, swap 810 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1)))) 811 std::swap(LHS, RHS); 812 813 // C2 is ODD 814 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2)) 815 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2)) 816 if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1)))) 817 if (C1->countTrailingZeros() == 0) 818 if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) { 819 Value *NewOr = Builder.CreateOr(Z, ~(*C2)); 820 return Builder.CreateSub(RHS, NewOr, "sub"); 821 } 822 return nullptr; 823 } 824 825 /// Wrapping flags may allow combining constants separated by an extend. 826 static Instruction *foldNoWrapAdd(BinaryOperator &Add, 827 InstCombiner::BuilderTy &Builder) { 828 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1); 829 Type *Ty = Add.getType(); 830 Constant *Op1C; 831 if (!match(Op1, m_Constant(Op1C))) 832 return nullptr; 833 834 // Try this match first because it results in an add in the narrow type. 835 // (zext (X +nuw C2)) + C1 --> zext (X + (C2 + trunc(C1))) 836 Value *X; 837 const APInt *C1, *C2; 838 if (match(Op1, m_APInt(C1)) && 839 match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C2))))) && 840 C1->isNegative() && C1->sge(-C2->sext(C1->getBitWidth()))) { 841 Constant *NewC = 842 ConstantInt::get(X->getType(), *C2 + C1->trunc(C2->getBitWidth())); 843 return new ZExtInst(Builder.CreateNUWAdd(X, NewC), Ty); 844 } 845 846 // More general combining of constants in the wide type. 847 // (sext (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C) 848 Constant *NarrowC; 849 if (match(Op0, m_OneUse(m_SExt(m_NSWAdd(m_Value(X), m_Constant(NarrowC)))))) { 850 Constant *WideC = ConstantExpr::getSExt(NarrowC, Ty); 851 Constant *NewC = ConstantExpr::getAdd(WideC, Op1C); 852 Value *WideX = Builder.CreateSExt(X, Ty); 853 return BinaryOperator::CreateAdd(WideX, NewC); 854 } 855 // (zext (X +nuw NarrowC)) + C --> (zext X) + (zext(NarrowC) + C) 856 if (match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_Constant(NarrowC)))))) { 857 Constant *WideC = ConstantExpr::getZExt(NarrowC, Ty); 858 Constant *NewC = ConstantExpr::getAdd(WideC, Op1C); 859 Value *WideX = Builder.CreateZExt(X, Ty); 860 return BinaryOperator::CreateAdd(WideX, NewC); 861 } 862 863 return nullptr; 864 } 865 866 Instruction *InstCombiner::foldAddWithConstant(BinaryOperator &Add) { 867 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1); 868 Constant *Op1C; 869 if (!match(Op1, m_Constant(Op1C))) 870 return nullptr; 871 872 if (Instruction *NV = foldBinOpIntoSelectOrPhi(Add)) 873 return NV; 874 875 Value *X, *Y; 876 877 // add (sub X, Y), -1 --> add (not Y), X 878 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y)))) && 879 match(Op1, m_AllOnes())) 880 return BinaryOperator::CreateAdd(Builder.CreateNot(Y), X); 881 882 // zext(bool) + C -> bool ? C + 1 : C 883 if (match(Op0, m_ZExt(m_Value(X))) && 884 X->getType()->getScalarSizeInBits() == 1) 885 return SelectInst::Create(X, AddOne(Op1C), Op1); 886 887 // ~X + C --> (C-1) - X 888 if (match(Op0, m_Not(m_Value(X)))) 889 return BinaryOperator::CreateSub(SubOne(Op1C), X); 890 891 const APInt *C; 892 if (!match(Op1, m_APInt(C))) 893 return nullptr; 894 895 if (C->isSignMask()) { 896 // If wrapping is not allowed, then the addition must set the sign bit: 897 // X + (signmask) --> X | signmask 898 if (Add.hasNoSignedWrap() || Add.hasNoUnsignedWrap()) 899 return BinaryOperator::CreateOr(Op0, Op1); 900 901 // If wrapping is allowed, then the addition flips the sign bit of LHS: 902 // X + (signmask) --> X ^ signmask 903 return BinaryOperator::CreateXor(Op0, Op1); 904 } 905 906 // Is this add the last step in a convoluted sext? 907 // add(zext(xor i16 X, -32768), -32768) --> sext X 908 Type *Ty = Add.getType(); 909 const APInt *C2; 910 if (match(Op0, m_ZExt(m_Xor(m_Value(X), m_APInt(C2)))) && 911 C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C) 912 return CastInst::Create(Instruction::SExt, X, Ty); 913 914 if (C->isOneValue() && Op0->hasOneUse()) { 915 // add (sext i1 X), 1 --> zext (not X) 916 // TODO: The smallest IR representation is (select X, 0, 1), and that would 917 // not require the one-use check. But we need to remove a transform in 918 // visitSelect and make sure that IR value tracking for select is equal or 919 // better than for these ops. 920 if (match(Op0, m_SExt(m_Value(X))) && 921 X->getType()->getScalarSizeInBits() == 1) 922 return new ZExtInst(Builder.CreateNot(X), Ty); 923 924 // Shifts and add used to flip and mask off the low bit: 925 // add (ashr (shl i32 X, 31), 31), 1 --> and (not X), 1 926 const APInt *C3; 927 if (match(Op0, m_AShr(m_Shl(m_Value(X), m_APInt(C2)), m_APInt(C3))) && 928 C2 == C3 && *C2 == Ty->getScalarSizeInBits() - 1) { 929 Value *NotX = Builder.CreateNot(X); 930 return BinaryOperator::CreateAnd(NotX, ConstantInt::get(Ty, 1)); 931 } 932 } 933 934 return nullptr; 935 } 936 937 // Matches multiplication expression Op * C where C is a constant. Returns the 938 // constant value in C and the other operand in Op. Returns true if such a 939 // match is found. 940 static bool MatchMul(Value *E, Value *&Op, APInt &C) { 941 const APInt *AI; 942 if (match(E, m_Mul(m_Value(Op), m_APInt(AI)))) { 943 C = *AI; 944 return true; 945 } 946 if (match(E, m_Shl(m_Value(Op), m_APInt(AI)))) { 947 C = APInt(AI->getBitWidth(), 1); 948 C <<= *AI; 949 return true; 950 } 951 return false; 952 } 953 954 // Matches remainder expression Op % C where C is a constant. Returns the 955 // constant value in C and the other operand in Op. Returns the signedness of 956 // the remainder operation in IsSigned. Returns true if such a match is 957 // found. 958 static bool MatchRem(Value *E, Value *&Op, APInt &C, bool &IsSigned) { 959 const APInt *AI; 960 IsSigned = false; 961 if (match(E, m_SRem(m_Value(Op), m_APInt(AI)))) { 962 IsSigned = true; 963 C = *AI; 964 return true; 965 } 966 if (match(E, m_URem(m_Value(Op), m_APInt(AI)))) { 967 C = *AI; 968 return true; 969 } 970 if (match(E, m_And(m_Value(Op), m_APInt(AI))) && (*AI + 1).isPowerOf2()) { 971 C = *AI + 1; 972 return true; 973 } 974 return false; 975 } 976 977 // Matches division expression Op / C with the given signedness as indicated 978 // by IsSigned, where C is a constant. Returns the constant value in C and the 979 // other operand in Op. Returns true if such a match is found. 980 static bool MatchDiv(Value *E, Value *&Op, APInt &C, bool IsSigned) { 981 const APInt *AI; 982 if (IsSigned && match(E, m_SDiv(m_Value(Op), m_APInt(AI)))) { 983 C = *AI; 984 return true; 985 } 986 if (!IsSigned) { 987 if (match(E, m_UDiv(m_Value(Op), m_APInt(AI)))) { 988 C = *AI; 989 return true; 990 } 991 if (match(E, m_LShr(m_Value(Op), m_APInt(AI)))) { 992 C = APInt(AI->getBitWidth(), 1); 993 C <<= *AI; 994 return true; 995 } 996 } 997 return false; 998 } 999 1000 // Returns whether C0 * C1 with the given signedness overflows. 1001 static bool MulWillOverflow(APInt &C0, APInt &C1, bool IsSigned) { 1002 bool overflow; 1003 if (IsSigned) 1004 (void)C0.smul_ov(C1, overflow); 1005 else 1006 (void)C0.umul_ov(C1, overflow); 1007 return overflow; 1008 } 1009 1010 // Simplifies X % C0 + (( X / C0 ) % C1) * C0 to X % (C0 * C1), where (C0 * C1) 1011 // does not overflow. 1012 Value *InstCombiner::SimplifyAddWithRemainder(BinaryOperator &I) { 1013 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1014 Value *X, *MulOpV; 1015 APInt C0, MulOpC; 1016 bool IsSigned; 1017 // Match I = X % C0 + MulOpV * C0 1018 if (((MatchRem(LHS, X, C0, IsSigned) && MatchMul(RHS, MulOpV, MulOpC)) || 1019 (MatchRem(RHS, X, C0, IsSigned) && MatchMul(LHS, MulOpV, MulOpC))) && 1020 C0 == MulOpC) { 1021 Value *RemOpV; 1022 APInt C1; 1023 bool Rem2IsSigned; 1024 // Match MulOpC = RemOpV % C1 1025 if (MatchRem(MulOpV, RemOpV, C1, Rem2IsSigned) && 1026 IsSigned == Rem2IsSigned) { 1027 Value *DivOpV; 1028 APInt DivOpC; 1029 // Match RemOpV = X / C0 1030 if (MatchDiv(RemOpV, DivOpV, DivOpC, IsSigned) && X == DivOpV && 1031 C0 == DivOpC && !MulWillOverflow(C0, C1, IsSigned)) { 1032 Value *NewDivisor = 1033 ConstantInt::get(X->getType()->getContext(), C0 * C1); 1034 return IsSigned ? Builder.CreateSRem(X, NewDivisor, "srem") 1035 : Builder.CreateURem(X, NewDivisor, "urem"); 1036 } 1037 } 1038 } 1039 1040 return nullptr; 1041 } 1042 1043 /// Fold 1044 /// (1 << NBits) - 1 1045 /// Into: 1046 /// ~(-(1 << NBits)) 1047 /// Because a 'not' is better for bit-tracking analysis and other transforms 1048 /// than an 'add'. The new shl is always nsw, and is nuw if old `and` was. 1049 static Instruction *canonicalizeLowbitMask(BinaryOperator &I, 1050 InstCombiner::BuilderTy &Builder) { 1051 Value *NBits; 1052 if (!match(&I, m_Add(m_OneUse(m_Shl(m_One(), m_Value(NBits))), m_AllOnes()))) 1053 return nullptr; 1054 1055 Constant *MinusOne = Constant::getAllOnesValue(NBits->getType()); 1056 Value *NotMask = Builder.CreateShl(MinusOne, NBits, "notmask"); 1057 // Be wary of constant folding. 1058 if (auto *BOp = dyn_cast<BinaryOperator>(NotMask)) { 1059 // Always NSW. But NUW propagates from `add`. 1060 BOp->setHasNoSignedWrap(); 1061 BOp->setHasNoUnsignedWrap(I.hasNoUnsignedWrap()); 1062 } 1063 1064 return BinaryOperator::CreateNot(NotMask, I.getName()); 1065 } 1066 1067 static Instruction *foldToUnsignedSaturatedAdd(BinaryOperator &I) { 1068 assert(I.getOpcode() == Instruction::Add && "Expecting add instruction"); 1069 Type *Ty = I.getType(); 1070 auto getUAddSat = [&]() { 1071 return Intrinsic::getDeclaration(I.getModule(), Intrinsic::uadd_sat, Ty); 1072 }; 1073 1074 // add (umin X, ~Y), Y --> uaddsat X, Y 1075 Value *X, *Y; 1076 if (match(&I, m_c_Add(m_c_UMin(m_Value(X), m_Not(m_Value(Y))), 1077 m_Deferred(Y)))) 1078 return CallInst::Create(getUAddSat(), { X, Y }); 1079 1080 // add (umin X, ~C), C --> uaddsat X, C 1081 const APInt *C, *NotC; 1082 if (match(&I, m_Add(m_UMin(m_Value(X), m_APInt(NotC)), m_APInt(C))) && 1083 *C == ~*NotC) 1084 return CallInst::Create(getUAddSat(), { X, ConstantInt::get(Ty, *C) }); 1085 1086 return nullptr; 1087 } 1088 1089 Instruction *InstCombiner::visitAdd(BinaryOperator &I) { 1090 if (Value *V = SimplifyAddInst(I.getOperand(0), I.getOperand(1), 1091 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(), 1092 SQ.getWithInstruction(&I))) 1093 return replaceInstUsesWith(I, V); 1094 1095 if (SimplifyAssociativeOrCommutative(I)) 1096 return &I; 1097 1098 if (Instruction *X = foldVectorBinop(I)) 1099 return X; 1100 1101 // (A*B)+(A*C) -> A*(B+C) etc 1102 if (Value *V = SimplifyUsingDistributiveLaws(I)) 1103 return replaceInstUsesWith(I, V); 1104 1105 if (Instruction *X = foldAddWithConstant(I)) 1106 return X; 1107 1108 if (Instruction *X = foldNoWrapAdd(I, Builder)) 1109 return X; 1110 1111 // FIXME: This should be moved into the above helper function to allow these 1112 // transforms for general constant or constant splat vectors. 1113 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1114 Type *Ty = I.getType(); 1115 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 1116 Value *XorLHS = nullptr; ConstantInt *XorRHS = nullptr; 1117 if (match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) { 1118 unsigned TySizeBits = Ty->getScalarSizeInBits(); 1119 const APInt &RHSVal = CI->getValue(); 1120 unsigned ExtendAmt = 0; 1121 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext. 1122 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext. 1123 if (XorRHS->getValue() == -RHSVal) { 1124 if (RHSVal.isPowerOf2()) 1125 ExtendAmt = TySizeBits - RHSVal.logBase2() - 1; 1126 else if (XorRHS->getValue().isPowerOf2()) 1127 ExtendAmt = TySizeBits - XorRHS->getValue().logBase2() - 1; 1128 } 1129 1130 if (ExtendAmt) { 1131 APInt Mask = APInt::getHighBitsSet(TySizeBits, ExtendAmt); 1132 if (!MaskedValueIsZero(XorLHS, Mask, 0, &I)) 1133 ExtendAmt = 0; 1134 } 1135 1136 if (ExtendAmt) { 1137 Constant *ShAmt = ConstantInt::get(Ty, ExtendAmt); 1138 Value *NewShl = Builder.CreateShl(XorLHS, ShAmt, "sext"); 1139 return BinaryOperator::CreateAShr(NewShl, ShAmt); 1140 } 1141 1142 // If this is a xor that was canonicalized from a sub, turn it back into 1143 // a sub and fuse this add with it. 1144 if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) { 1145 KnownBits LHSKnown = computeKnownBits(XorLHS, 0, &I); 1146 if ((XorRHS->getValue() | LHSKnown.Zero).isAllOnesValue()) 1147 return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI), 1148 XorLHS); 1149 } 1150 // (X + signmask) + C could have gotten canonicalized to (X^signmask) + C, 1151 // transform them into (X + (signmask ^ C)) 1152 if (XorRHS->getValue().isSignMask()) 1153 return BinaryOperator::CreateAdd(XorLHS, 1154 ConstantExpr::getXor(XorRHS, CI)); 1155 } 1156 } 1157 1158 if (Ty->isIntOrIntVectorTy(1)) 1159 return BinaryOperator::CreateXor(LHS, RHS); 1160 1161 // X + X --> X << 1 1162 if (LHS == RHS) { 1163 auto *Shl = BinaryOperator::CreateShl(LHS, ConstantInt::get(Ty, 1)); 1164 Shl->setHasNoSignedWrap(I.hasNoSignedWrap()); 1165 Shl->setHasNoUnsignedWrap(I.hasNoUnsignedWrap()); 1166 return Shl; 1167 } 1168 1169 Value *A, *B; 1170 if (match(LHS, m_Neg(m_Value(A)))) { 1171 // -A + -B --> -(A + B) 1172 if (match(RHS, m_Neg(m_Value(B)))) 1173 return BinaryOperator::CreateNeg(Builder.CreateAdd(A, B)); 1174 1175 // -A + B --> B - A 1176 return BinaryOperator::CreateSub(RHS, A); 1177 } 1178 1179 // Canonicalize sext to zext for better value tracking potential. 1180 // add A, sext(B) --> sub A, zext(B) 1181 if (match(&I, m_c_Add(m_Value(A), m_OneUse(m_SExt(m_Value(B))))) && 1182 B->getType()->isIntOrIntVectorTy(1)) 1183 return BinaryOperator::CreateSub(A, Builder.CreateZExt(B, Ty)); 1184 1185 // A + -B --> A - B 1186 if (match(RHS, m_Neg(m_Value(B)))) 1187 return BinaryOperator::CreateSub(LHS, B); 1188 1189 if (Value *V = checkForNegativeOperand(I, Builder)) 1190 return replaceInstUsesWith(I, V); 1191 1192 // (A + 1) + ~B --> A - B 1193 // ~B + (A + 1) --> A - B 1194 if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B))))) 1195 return BinaryOperator::CreateSub(A, B); 1196 1197 // X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1) 1198 if (Value *V = SimplifyAddWithRemainder(I)) return replaceInstUsesWith(I, V); 1199 1200 // A+B --> A|B iff A and B have no bits set in common. 1201 if (haveNoCommonBitsSet(LHS, RHS, DL, &AC, &I, &DT)) 1202 return BinaryOperator::CreateOr(LHS, RHS); 1203 1204 // FIXME: We already did a check for ConstantInt RHS above this. 1205 // FIXME: Is this pattern covered by another fold? No regression tests fail on 1206 // removal. 1207 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) { 1208 // (X & FF00) + xx00 -> (X+xx00) & FF00 1209 Value *X; 1210 ConstantInt *C2; 1211 if (LHS->hasOneUse() && 1212 match(LHS, m_And(m_Value(X), m_ConstantInt(C2))) && 1213 CRHS->getValue() == (CRHS->getValue() & C2->getValue())) { 1214 // See if all bits from the first bit set in the Add RHS up are included 1215 // in the mask. First, get the rightmost bit. 1216 const APInt &AddRHSV = CRHS->getValue(); 1217 1218 // Form a mask of all bits from the lowest bit added through the top. 1219 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1)); 1220 1221 // See if the and mask includes all of these bits. 1222 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue()); 1223 1224 if (AddRHSHighBits == AddRHSHighBitsAnd) { 1225 // Okay, the xform is safe. Insert the new add pronto. 1226 Value *NewAdd = Builder.CreateAdd(X, CRHS, LHS->getName()); 1227 return BinaryOperator::CreateAnd(NewAdd, C2); 1228 } 1229 } 1230 } 1231 1232 // add (select X 0 (sub n A)) A --> select X A n 1233 { 1234 SelectInst *SI = dyn_cast<SelectInst>(LHS); 1235 Value *A = RHS; 1236 if (!SI) { 1237 SI = dyn_cast<SelectInst>(RHS); 1238 A = LHS; 1239 } 1240 if (SI && SI->hasOneUse()) { 1241 Value *TV = SI->getTrueValue(); 1242 Value *FV = SI->getFalseValue(); 1243 Value *N; 1244 1245 // Can we fold the add into the argument of the select? 1246 // We check both true and false select arguments for a matching subtract. 1247 if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Specific(A)))) 1248 // Fold the add into the true select value. 1249 return SelectInst::Create(SI->getCondition(), N, A); 1250 1251 if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Specific(A)))) 1252 // Fold the add into the false select value. 1253 return SelectInst::Create(SI->getCondition(), A, N); 1254 } 1255 } 1256 1257 if (Instruction *Ext = narrowMathIfNoOverflow(I)) 1258 return Ext; 1259 1260 // (add (xor A, B) (and A, B)) --> (or A, B) 1261 // (add (and A, B) (xor A, B)) --> (or A, B) 1262 if (match(&I, m_c_BinOp(m_Xor(m_Value(A), m_Value(B)), 1263 m_c_And(m_Deferred(A), m_Deferred(B))))) 1264 return BinaryOperator::CreateOr(A, B); 1265 1266 // (add (or A, B) (and A, B)) --> (add A, B) 1267 // (add (and A, B) (or A, B)) --> (add A, B) 1268 if (match(&I, m_c_BinOp(m_Or(m_Value(A), m_Value(B)), 1269 m_c_And(m_Deferred(A), m_Deferred(B))))) { 1270 I.setOperand(0, A); 1271 I.setOperand(1, B); 1272 return &I; 1273 } 1274 1275 // TODO(jingyue): Consider willNotOverflowSignedAdd and 1276 // willNotOverflowUnsignedAdd to reduce the number of invocations of 1277 // computeKnownBits. 1278 bool Changed = false; 1279 if (!I.hasNoSignedWrap() && willNotOverflowSignedAdd(LHS, RHS, I)) { 1280 Changed = true; 1281 I.setHasNoSignedWrap(true); 1282 } 1283 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedAdd(LHS, RHS, I)) { 1284 Changed = true; 1285 I.setHasNoUnsignedWrap(true); 1286 } 1287 1288 if (Instruction *V = canonicalizeLowbitMask(I, Builder)) 1289 return V; 1290 1291 if (Instruction *SatAdd = foldToUnsignedSaturatedAdd(I)) 1292 return SatAdd; 1293 1294 return Changed ? &I : nullptr; 1295 } 1296 1297 /// Factor a common operand out of fadd/fsub of fmul/fdiv. 1298 static Instruction *factorizeFAddFSub(BinaryOperator &I, 1299 InstCombiner::BuilderTy &Builder) { 1300 assert((I.getOpcode() == Instruction::FAdd || 1301 I.getOpcode() == Instruction::FSub) && "Expecting fadd/fsub"); 1302 assert(I.hasAllowReassoc() && I.hasNoSignedZeros() && 1303 "FP factorization requires FMF"); 1304 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1305 Value *X, *Y, *Z; 1306 bool IsFMul; 1307 if ((match(Op0, m_OneUse(m_FMul(m_Value(X), m_Value(Z)))) && 1308 match(Op1, m_OneUse(m_c_FMul(m_Value(Y), m_Specific(Z))))) || 1309 (match(Op0, m_OneUse(m_FMul(m_Value(Z), m_Value(X)))) && 1310 match(Op1, m_OneUse(m_c_FMul(m_Value(Y), m_Specific(Z)))))) 1311 IsFMul = true; 1312 else if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Z)))) && 1313 match(Op1, m_OneUse(m_FDiv(m_Value(Y), m_Specific(Z))))) 1314 IsFMul = false; 1315 else 1316 return nullptr; 1317 1318 // (X * Z) + (Y * Z) --> (X + Y) * Z 1319 // (X * Z) - (Y * Z) --> (X - Y) * Z 1320 // (X / Z) + (Y / Z) --> (X + Y) / Z 1321 // (X / Z) - (Y / Z) --> (X - Y) / Z 1322 bool IsFAdd = I.getOpcode() == Instruction::FAdd; 1323 Value *XY = IsFAdd ? Builder.CreateFAddFMF(X, Y, &I) 1324 : Builder.CreateFSubFMF(X, Y, &I); 1325 1326 // Bail out if we just created a denormal constant. 1327 // TODO: This is copied from a previous implementation. Is it necessary? 1328 const APFloat *C; 1329 if (match(XY, m_APFloat(C)) && !C->isNormal()) 1330 return nullptr; 1331 1332 return IsFMul ? BinaryOperator::CreateFMulFMF(XY, Z, &I) 1333 : BinaryOperator::CreateFDivFMF(XY, Z, &I); 1334 } 1335 1336 Instruction *InstCombiner::visitFAdd(BinaryOperator &I) { 1337 if (Value *V = SimplifyFAddInst(I.getOperand(0), I.getOperand(1), 1338 I.getFastMathFlags(), 1339 SQ.getWithInstruction(&I))) 1340 return replaceInstUsesWith(I, V); 1341 1342 if (SimplifyAssociativeOrCommutative(I)) 1343 return &I; 1344 1345 if (Instruction *X = foldVectorBinop(I)) 1346 return X; 1347 1348 if (Instruction *FoldedFAdd = foldBinOpIntoSelectOrPhi(I)) 1349 return FoldedFAdd; 1350 1351 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1352 Value *X; 1353 // (-X) + Y --> Y - X 1354 if (match(LHS, m_FNeg(m_Value(X)))) 1355 return BinaryOperator::CreateFSubFMF(RHS, X, &I); 1356 // Y + (-X) --> Y - X 1357 if (match(RHS, m_FNeg(m_Value(X)))) 1358 return BinaryOperator::CreateFSubFMF(LHS, X, &I); 1359 1360 // Check for (fadd double (sitofp x), y), see if we can merge this into an 1361 // integer add followed by a promotion. 1362 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) { 1363 Value *LHSIntVal = LHSConv->getOperand(0); 1364 Type *FPType = LHSConv->getType(); 1365 1366 // TODO: This check is overly conservative. In many cases known bits 1367 // analysis can tell us that the result of the addition has less significant 1368 // bits than the integer type can hold. 1369 auto IsValidPromotion = [](Type *FTy, Type *ITy) { 1370 Type *FScalarTy = FTy->getScalarType(); 1371 Type *IScalarTy = ITy->getScalarType(); 1372 1373 // Do we have enough bits in the significand to represent the result of 1374 // the integer addition? 1375 unsigned MaxRepresentableBits = 1376 APFloat::semanticsPrecision(FScalarTy->getFltSemantics()); 1377 return IScalarTy->getIntegerBitWidth() <= MaxRepresentableBits; 1378 }; 1379 1380 // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst)) 1381 // ... if the constant fits in the integer value. This is useful for things 1382 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer 1383 // requires a constant pool load, and generally allows the add to be better 1384 // instcombined. 1385 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) 1386 if (IsValidPromotion(FPType, LHSIntVal->getType())) { 1387 Constant *CI = 1388 ConstantExpr::getFPToSI(CFP, LHSIntVal->getType()); 1389 if (LHSConv->hasOneUse() && 1390 ConstantExpr::getSIToFP(CI, I.getType()) == CFP && 1391 willNotOverflowSignedAdd(LHSIntVal, CI, I)) { 1392 // Insert the new integer add. 1393 Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, CI, "addconv"); 1394 return new SIToFPInst(NewAdd, I.getType()); 1395 } 1396 } 1397 1398 // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y)) 1399 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) { 1400 Value *RHSIntVal = RHSConv->getOperand(0); 1401 // It's enough to check LHS types only because we require int types to 1402 // be the same for this transform. 1403 if (IsValidPromotion(FPType, LHSIntVal->getType())) { 1404 // Only do this if x/y have the same type, if at least one of them has a 1405 // single use (so we don't increase the number of int->fp conversions), 1406 // and if the integer add will not overflow. 1407 if (LHSIntVal->getType() == RHSIntVal->getType() && 1408 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) && 1409 willNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) { 1410 // Insert the new integer add. 1411 Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, RHSIntVal, "addconv"); 1412 return new SIToFPInst(NewAdd, I.getType()); 1413 } 1414 } 1415 } 1416 } 1417 1418 // Handle specials cases for FAdd with selects feeding the operation 1419 if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS)) 1420 return replaceInstUsesWith(I, V); 1421 1422 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) { 1423 if (Instruction *F = factorizeFAddFSub(I, Builder)) 1424 return F; 1425 if (Value *V = FAddCombine(Builder).simplify(&I)) 1426 return replaceInstUsesWith(I, V); 1427 } 1428 1429 return nullptr; 1430 } 1431 1432 /// Optimize pointer differences into the same array into a size. Consider: 1433 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer 1434 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract. 1435 Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS, 1436 Type *Ty) { 1437 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize 1438 // this. 1439 bool Swapped = false; 1440 GEPOperator *GEP1 = nullptr, *GEP2 = nullptr; 1441 1442 // For now we require one side to be the base pointer "A" or a constant 1443 // GEP derived from it. 1444 if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) { 1445 // (gep X, ...) - X 1446 if (LHSGEP->getOperand(0) == RHS) { 1447 GEP1 = LHSGEP; 1448 Swapped = false; 1449 } else if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) { 1450 // (gep X, ...) - (gep X, ...) 1451 if (LHSGEP->getOperand(0)->stripPointerCasts() == 1452 RHSGEP->getOperand(0)->stripPointerCasts()) { 1453 GEP2 = RHSGEP; 1454 GEP1 = LHSGEP; 1455 Swapped = false; 1456 } 1457 } 1458 } 1459 1460 if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) { 1461 // X - (gep X, ...) 1462 if (RHSGEP->getOperand(0) == LHS) { 1463 GEP1 = RHSGEP; 1464 Swapped = true; 1465 } else if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) { 1466 // (gep X, ...) - (gep X, ...) 1467 if (RHSGEP->getOperand(0)->stripPointerCasts() == 1468 LHSGEP->getOperand(0)->stripPointerCasts()) { 1469 GEP2 = LHSGEP; 1470 GEP1 = RHSGEP; 1471 Swapped = true; 1472 } 1473 } 1474 } 1475 1476 if (!GEP1) 1477 // No GEP found. 1478 return nullptr; 1479 1480 if (GEP2) { 1481 // (gep X, ...) - (gep X, ...) 1482 // 1483 // Avoid duplicating the arithmetic if there are more than one non-constant 1484 // indices between the two GEPs and either GEP has a non-constant index and 1485 // multiple users. If zero non-constant index, the result is a constant and 1486 // there is no duplication. If one non-constant index, the result is an add 1487 // or sub with a constant, which is no larger than the original code, and 1488 // there's no duplicated arithmetic, even if either GEP has multiple 1489 // users. If more than one non-constant indices combined, as long as the GEP 1490 // with at least one non-constant index doesn't have multiple users, there 1491 // is no duplication. 1492 unsigned NumNonConstantIndices1 = GEP1->countNonConstantIndices(); 1493 unsigned NumNonConstantIndices2 = GEP2->countNonConstantIndices(); 1494 if (NumNonConstantIndices1 + NumNonConstantIndices2 > 1 && 1495 ((NumNonConstantIndices1 > 0 && !GEP1->hasOneUse()) || 1496 (NumNonConstantIndices2 > 0 && !GEP2->hasOneUse()))) { 1497 return nullptr; 1498 } 1499 } 1500 1501 // Emit the offset of the GEP and an intptr_t. 1502 Value *Result = EmitGEPOffset(GEP1); 1503 1504 // If we had a constant expression GEP on the other side offsetting the 1505 // pointer, subtract it from the offset we have. 1506 if (GEP2) { 1507 Value *Offset = EmitGEPOffset(GEP2); 1508 Result = Builder.CreateSub(Result, Offset); 1509 } 1510 1511 // If we have p - gep(p, ...) then we have to negate the result. 1512 if (Swapped) 1513 Result = Builder.CreateNeg(Result, "diff.neg"); 1514 1515 return Builder.CreateIntCast(Result, Ty, true); 1516 } 1517 1518 Instruction *InstCombiner::visitSub(BinaryOperator &I) { 1519 if (Value *V = SimplifySubInst(I.getOperand(0), I.getOperand(1), 1520 I.hasNoSignedWrap(), I.hasNoUnsignedWrap(), 1521 SQ.getWithInstruction(&I))) 1522 return replaceInstUsesWith(I, V); 1523 1524 if (Instruction *X = foldVectorBinop(I)) 1525 return X; 1526 1527 // (A*B)-(A*C) -> A*(B-C) etc 1528 if (Value *V = SimplifyUsingDistributiveLaws(I)) 1529 return replaceInstUsesWith(I, V); 1530 1531 // If this is a 'B = x-(-A)', change to B = x+A. 1532 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1533 if (Value *V = dyn_castNegVal(Op1)) { 1534 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V); 1535 1536 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) { 1537 assert(BO->getOpcode() == Instruction::Sub && 1538 "Expected a subtraction operator!"); 1539 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap()) 1540 Res->setHasNoSignedWrap(true); 1541 } else { 1542 if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap()) 1543 Res->setHasNoSignedWrap(true); 1544 } 1545 1546 return Res; 1547 } 1548 1549 if (I.getType()->isIntOrIntVectorTy(1)) 1550 return BinaryOperator::CreateXor(Op0, Op1); 1551 1552 // Replace (-1 - A) with (~A). 1553 if (match(Op0, m_AllOnes())) 1554 return BinaryOperator::CreateNot(Op1); 1555 1556 // (~X) - (~Y) --> Y - X 1557 Value *X, *Y; 1558 if (match(Op0, m_Not(m_Value(X))) && match(Op1, m_Not(m_Value(Y)))) 1559 return BinaryOperator::CreateSub(Y, X); 1560 1561 // (X + -1) - Y --> ~Y + X 1562 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_AllOnes())))) 1563 return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X); 1564 1565 // Y - (X + 1) --> ~X + Y 1566 if (match(Op1, m_OneUse(m_Add(m_Value(X), m_One())))) 1567 return BinaryOperator::CreateAdd(Builder.CreateNot(X), Op0); 1568 1569 if (Constant *C = dyn_cast<Constant>(Op0)) { 1570 bool IsNegate = match(C, m_ZeroInt()); 1571 Value *X; 1572 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { 1573 // 0 - (zext bool) --> sext bool 1574 // C - (zext bool) --> bool ? C - 1 : C 1575 if (IsNegate) 1576 return CastInst::CreateSExtOrBitCast(X, I.getType()); 1577 return SelectInst::Create(X, SubOne(C), C); 1578 } 1579 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) { 1580 // 0 - (sext bool) --> zext bool 1581 // C - (sext bool) --> bool ? C + 1 : C 1582 if (IsNegate) 1583 return CastInst::CreateZExtOrBitCast(X, I.getType()); 1584 return SelectInst::Create(X, AddOne(C), C); 1585 } 1586 1587 // C - ~X == X + (1+C) 1588 if (match(Op1, m_Not(m_Value(X)))) 1589 return BinaryOperator::CreateAdd(X, AddOne(C)); 1590 1591 // Try to fold constant sub into select arguments. 1592 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 1593 if (Instruction *R = FoldOpIntoSelect(I, SI)) 1594 return R; 1595 1596 // Try to fold constant sub into PHI values. 1597 if (PHINode *PN = dyn_cast<PHINode>(Op1)) 1598 if (Instruction *R = foldOpIntoPhi(I, PN)) 1599 return R; 1600 1601 // C-(X+C2) --> (C-C2)-X 1602 Constant *C2; 1603 if (match(Op1, m_Add(m_Value(X), m_Constant(C2)))) 1604 return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X); 1605 } 1606 1607 const APInt *Op0C; 1608 if (match(Op0, m_APInt(Op0C))) { 1609 unsigned BitWidth = I.getType()->getScalarSizeInBits(); 1610 1611 // -(X >>u 31) -> (X >>s 31) 1612 // -(X >>s 31) -> (X >>u 31) 1613 if (Op0C->isNullValue()) { 1614 Value *X; 1615 const APInt *ShAmt; 1616 if (match(Op1, m_LShr(m_Value(X), m_APInt(ShAmt))) && 1617 *ShAmt == BitWidth - 1) { 1618 Value *ShAmtOp = cast<Instruction>(Op1)->getOperand(1); 1619 return BinaryOperator::CreateAShr(X, ShAmtOp); 1620 } 1621 if (match(Op1, m_AShr(m_Value(X), m_APInt(ShAmt))) && 1622 *ShAmt == BitWidth - 1) { 1623 Value *ShAmtOp = cast<Instruction>(Op1)->getOperand(1); 1624 return BinaryOperator::CreateLShr(X, ShAmtOp); 1625 } 1626 1627 if (Op1->hasOneUse()) { 1628 Value *LHS, *RHS; 1629 SelectPatternFlavor SPF = matchSelectPattern(Op1, LHS, RHS).Flavor; 1630 if (SPF == SPF_ABS || SPF == SPF_NABS) { 1631 // This is a negate of an ABS/NABS pattern. Just swap the operands 1632 // of the select. 1633 SelectInst *SI = cast<SelectInst>(Op1); 1634 Value *TrueVal = SI->getTrueValue(); 1635 Value *FalseVal = SI->getFalseValue(); 1636 SI->setTrueValue(FalseVal); 1637 SI->setFalseValue(TrueVal); 1638 // Don't swap prof metadata, we didn't change the branch behavior. 1639 return replaceInstUsesWith(I, SI); 1640 } 1641 } 1642 } 1643 1644 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known 1645 // zero. 1646 if (Op0C->isMask()) { 1647 KnownBits RHSKnown = computeKnownBits(Op1, 0, &I); 1648 if ((*Op0C | RHSKnown.Zero).isAllOnesValue()) 1649 return BinaryOperator::CreateXor(Op1, Op0); 1650 } 1651 } 1652 1653 { 1654 Value *Y; 1655 // X-(X+Y) == -Y X-(Y+X) == -Y 1656 if (match(Op1, m_c_Add(m_Specific(Op0), m_Value(Y)))) 1657 return BinaryOperator::CreateNeg(Y); 1658 1659 // (X-Y)-X == -Y 1660 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y)))) 1661 return BinaryOperator::CreateNeg(Y); 1662 } 1663 1664 // (sub (or A, B), (xor A, B)) --> (and A, B) 1665 { 1666 Value *A, *B; 1667 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && 1668 match(Op0, m_c_Or(m_Specific(A), m_Specific(B)))) 1669 return BinaryOperator::CreateAnd(A, B); 1670 } 1671 1672 { 1673 Value *Y; 1674 // ((X | Y) - X) --> (~X & Y) 1675 if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1))))) 1676 return BinaryOperator::CreateAnd( 1677 Y, Builder.CreateNot(Op1, Op1->getName() + ".not")); 1678 } 1679 1680 if (Op1->hasOneUse()) { 1681 Value *X = nullptr, *Y = nullptr, *Z = nullptr; 1682 Constant *C = nullptr; 1683 1684 // (X - (Y - Z)) --> (X + (Z - Y)). 1685 if (match(Op1, m_Sub(m_Value(Y), m_Value(Z)))) 1686 return BinaryOperator::CreateAdd(Op0, 1687 Builder.CreateSub(Z, Y, Op1->getName())); 1688 1689 // (X - (X & Y)) --> (X & ~Y) 1690 if (match(Op1, m_c_And(m_Value(Y), m_Specific(Op0)))) 1691 return BinaryOperator::CreateAnd(Op0, 1692 Builder.CreateNot(Y, Y->getName() + ".not")); 1693 1694 // 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow. 1695 if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) && match(Op0, m_Zero()) && 1696 C->isNotMinSignedValue() && !C->isOneValue()) 1697 return BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C)); 1698 1699 // 0 - (X << Y) -> (-X << Y) when X is freely negatable. 1700 if (match(Op1, m_Shl(m_Value(X), m_Value(Y))) && match(Op0, m_Zero())) 1701 if (Value *XNeg = dyn_castNegVal(X)) 1702 return BinaryOperator::CreateShl(XNeg, Y); 1703 1704 // Subtracting -1/0 is the same as adding 1/0: 1705 // sub [nsw] Op0, sext(bool Y) -> add [nsw] Op0, zext(bool Y) 1706 // 'nuw' is dropped in favor of the canonical form. 1707 if (match(Op1, m_SExt(m_Value(Y))) && 1708 Y->getType()->getScalarSizeInBits() == 1) { 1709 Value *Zext = Builder.CreateZExt(Y, I.getType()); 1710 BinaryOperator *Add = BinaryOperator::CreateAdd(Op0, Zext); 1711 Add->setHasNoSignedWrap(I.hasNoSignedWrap()); 1712 return Add; 1713 } 1714 1715 // X - A*-B -> X + A*B 1716 // X - -A*B -> X + A*B 1717 Value *A, *B; 1718 if (match(Op1, m_c_Mul(m_Value(A), m_Neg(m_Value(B))))) 1719 return BinaryOperator::CreateAdd(Op0, Builder.CreateMul(A, B)); 1720 1721 // X - A*C -> X + A*-C 1722 // No need to handle commuted multiply because multiply handling will 1723 // ensure constant will be move to the right hand side. 1724 if (match(Op1, m_Mul(m_Value(A), m_Constant(C))) && !isa<ConstantExpr>(C)) { 1725 Value *NewMul = Builder.CreateMul(A, ConstantExpr::getNeg(C)); 1726 return BinaryOperator::CreateAdd(Op0, NewMul); 1727 } 1728 } 1729 1730 { 1731 // ~A - Min/Max(~A, O) -> Max/Min(A, ~O) - A 1732 // ~A - Min/Max(O, ~A) -> Max/Min(A, ~O) - A 1733 // Min/Max(~A, O) - ~A -> A - Max/Min(A, ~O) 1734 // Min/Max(O, ~A) - ~A -> A - Max/Min(A, ~O) 1735 // So long as O here is freely invertible, this will be neutral or a win. 1736 Value *LHS, *RHS, *A; 1737 Value *NotA = Op0, *MinMax = Op1; 1738 SelectPatternFlavor SPF = matchSelectPattern(MinMax, LHS, RHS).Flavor; 1739 if (!SelectPatternResult::isMinOrMax(SPF)) { 1740 NotA = Op1; 1741 MinMax = Op0; 1742 SPF = matchSelectPattern(MinMax, LHS, RHS).Flavor; 1743 } 1744 if (SelectPatternResult::isMinOrMax(SPF) && 1745 match(NotA, m_Not(m_Value(A))) && (NotA == LHS || NotA == RHS)) { 1746 if (NotA == LHS) 1747 std::swap(LHS, RHS); 1748 // LHS is now O above and expected to have at least 2 uses (the min/max) 1749 // NotA is epected to have 2 uses from the min/max and 1 from the sub. 1750 if (IsFreeToInvert(LHS, !LHS->hasNUsesOrMore(3)) && 1751 !NotA->hasNUsesOrMore(4)) { 1752 // Note: We don't generate the inverse max/min, just create the not of 1753 // it and let other folds do the rest. 1754 Value *Not = Builder.CreateNot(MinMax); 1755 if (NotA == Op0) 1756 return BinaryOperator::CreateSub(Not, A); 1757 else 1758 return BinaryOperator::CreateSub(A, Not); 1759 } 1760 } 1761 } 1762 1763 // Optimize pointer differences into the same array into a size. Consider: 1764 // &A[10] - &A[0]: we should compile this to "10". 1765 Value *LHSOp, *RHSOp; 1766 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) && 1767 match(Op1, m_PtrToInt(m_Value(RHSOp)))) 1768 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType())) 1769 return replaceInstUsesWith(I, Res); 1770 1771 // trunc(p)-trunc(q) -> trunc(p-q) 1772 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) && 1773 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp))))) 1774 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType())) 1775 return replaceInstUsesWith(I, Res); 1776 1777 // Canonicalize a shifty way to code absolute value to the common pattern. 1778 // There are 2 potential commuted variants. 1779 // We're relying on the fact that we only do this transform when the shift has 1780 // exactly 2 uses and the xor has exactly 1 use (otherwise, we might increase 1781 // instructions). 1782 Value *A; 1783 const APInt *ShAmt; 1784 Type *Ty = I.getType(); 1785 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) && 1786 Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 && 1787 match(Op0, m_OneUse(m_c_Xor(m_Specific(A), m_Specific(Op1))))) { 1788 // B = ashr i32 A, 31 ; smear the sign bit 1789 // sub (xor A, B), B ; flip bits if negative and subtract -1 (add 1) 1790 // --> (A < 0) ? -A : A 1791 Value *Cmp = Builder.CreateICmpSLT(A, ConstantInt::getNullValue(Ty)); 1792 // Copy the nuw/nsw flags from the sub to the negate. 1793 Value *Neg = Builder.CreateNeg(A, "", I.hasNoUnsignedWrap(), 1794 I.hasNoSignedWrap()); 1795 return SelectInst::Create(Cmp, Neg, A); 1796 } 1797 1798 if (Instruction *Ext = narrowMathIfNoOverflow(I)) 1799 return Ext; 1800 1801 bool Changed = false; 1802 if (!I.hasNoSignedWrap() && willNotOverflowSignedSub(Op0, Op1, I)) { 1803 Changed = true; 1804 I.setHasNoSignedWrap(true); 1805 } 1806 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedSub(Op0, Op1, I)) { 1807 Changed = true; 1808 I.setHasNoUnsignedWrap(true); 1809 } 1810 1811 return Changed ? &I : nullptr; 1812 } 1813 1814 Instruction *InstCombiner::visitFSub(BinaryOperator &I) { 1815 if (Value *V = SimplifyFSubInst(I.getOperand(0), I.getOperand(1), 1816 I.getFastMathFlags(), 1817 SQ.getWithInstruction(&I))) 1818 return replaceInstUsesWith(I, V); 1819 1820 if (Instruction *X = foldVectorBinop(I)) 1821 return X; 1822 1823 // Subtraction from -0.0 is the canonical form of fneg. 1824 // fsub nsz 0, X ==> fsub nsz -0.0, X 1825 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1826 if (I.hasNoSignedZeros() && match(Op0, m_PosZeroFP())) 1827 return BinaryOperator::CreateFNegFMF(Op1, &I); 1828 1829 Value *X, *Y; 1830 Constant *C; 1831 1832 // Fold negation into constant operand. This is limited with one-use because 1833 // fneg is assumed better for analysis and cheaper in codegen than fmul/fdiv. 1834 // -(X * C) --> X * (-C) 1835 if (match(&I, m_FNeg(m_OneUse(m_FMul(m_Value(X), m_Constant(C)))))) 1836 return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I); 1837 // -(X / C) --> X / (-C) 1838 if (match(&I, m_FNeg(m_OneUse(m_FDiv(m_Value(X), m_Constant(C)))))) 1839 return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I); 1840 // -(C / X) --> (-C) / X 1841 if (match(&I, m_FNeg(m_OneUse(m_FDiv(m_Constant(C), m_Value(X)))))) 1842 return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I); 1843 1844 // If Op0 is not -0.0 or we can ignore -0.0: Z - (X - Y) --> Z + (Y - X) 1845 // Canonicalize to fadd to make analysis easier. 1846 // This can also help codegen because fadd is commutative. 1847 // Note that if this fsub was really an fneg, the fadd with -0.0 will get 1848 // killed later. We still limit that particular transform with 'hasOneUse' 1849 // because an fneg is assumed better/cheaper than a generic fsub. 1850 if (I.hasNoSignedZeros() || CannotBeNegativeZero(Op0, SQ.TLI)) { 1851 if (match(Op1, m_OneUse(m_FSub(m_Value(X), m_Value(Y))))) { 1852 Value *NewSub = Builder.CreateFSubFMF(Y, X, &I); 1853 return BinaryOperator::CreateFAddFMF(Op0, NewSub, &I); 1854 } 1855 } 1856 1857 if (isa<Constant>(Op0)) 1858 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) 1859 if (Instruction *NV = FoldOpIntoSelect(I, SI)) 1860 return NV; 1861 1862 // X - C --> X + (-C) 1863 // But don't transform constant expressions because there's an inverse fold 1864 // for X + (-Y) --> X - Y. 1865 if (match(Op1, m_Constant(C)) && !isa<ConstantExpr>(Op1)) 1866 return BinaryOperator::CreateFAddFMF(Op0, ConstantExpr::getFNeg(C), &I); 1867 1868 // X - (-Y) --> X + Y 1869 if (match(Op1, m_FNeg(m_Value(Y)))) 1870 return BinaryOperator::CreateFAddFMF(Op0, Y, &I); 1871 1872 // Similar to above, but look through a cast of the negated value: 1873 // X - (fptrunc(-Y)) --> X + fptrunc(Y) 1874 Type *Ty = I.getType(); 1875 if (match(Op1, m_OneUse(m_FPTrunc(m_FNeg(m_Value(Y)))))) 1876 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPTrunc(Y, Ty), &I); 1877 1878 // X - (fpext(-Y)) --> X + fpext(Y) 1879 if (match(Op1, m_OneUse(m_FPExt(m_FNeg(m_Value(Y)))))) 1880 return BinaryOperator::CreateFAddFMF(Op0, Builder.CreateFPExt(Y, Ty), &I); 1881 1882 // Handle special cases for FSub with selects feeding the operation 1883 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1)) 1884 return replaceInstUsesWith(I, V); 1885 1886 if (I.hasAllowReassoc() && I.hasNoSignedZeros()) { 1887 // (Y - X) - Y --> -X 1888 if (match(Op0, m_FSub(m_Specific(Op1), m_Value(X)))) 1889 return BinaryOperator::CreateFNegFMF(X, &I); 1890 1891 // Y - (X + Y) --> -X 1892 // Y - (Y + X) --> -X 1893 if (match(Op1, m_c_FAdd(m_Specific(Op0), m_Value(X)))) 1894 return BinaryOperator::CreateFNegFMF(X, &I); 1895 1896 // (X * C) - X --> X * (C - 1.0) 1897 if (match(Op0, m_FMul(m_Specific(Op1), m_Constant(C)))) { 1898 Constant *CSubOne = ConstantExpr::getFSub(C, ConstantFP::get(Ty, 1.0)); 1899 return BinaryOperator::CreateFMulFMF(Op1, CSubOne, &I); 1900 } 1901 // X - (X * C) --> X * (1.0 - C) 1902 if (match(Op1, m_FMul(m_Specific(Op0), m_Constant(C)))) { 1903 Constant *OneSubC = ConstantExpr::getFSub(ConstantFP::get(Ty, 1.0), C); 1904 return BinaryOperator::CreateFMulFMF(Op0, OneSubC, &I); 1905 } 1906 1907 if (Instruction *F = factorizeFAddFSub(I, Builder)) 1908 return F; 1909 1910 // TODO: This performs reassociative folds for FP ops. Some fraction of the 1911 // functionality has been subsumed by simple pattern matching here and in 1912 // InstSimplify. We should let a dedicated reassociation pass handle more 1913 // complex pattern matching and remove this from InstCombine. 1914 if (Value *V = FAddCombine(Builder).simplify(&I)) 1915 return replaceInstUsesWith(I, V); 1916 } 1917 1918 return nullptr; 1919 } 1920