1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements routines for folding instructions into simpler forms 11 // that do not require creating new instructions. This does constant folding 12 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either 13 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value 14 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been 15 // simplified: This is usually true and assuming it simplifies the logic (if 16 // they have not been simplified then results are correct but maybe suboptimal). 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/ConstantFolding.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/Analysis/VectorUtils.h" 29 #include "llvm/IR/ConstantRange.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/Dominators.h" 32 #include "llvm/IR/GetElementPtrTypeIterator.h" 33 #include "llvm/IR/GlobalAlias.h" 34 #include "llvm/IR/Operator.h" 35 #include "llvm/IR/PatternMatch.h" 36 #include "llvm/IR/ValueHandle.h" 37 #include <algorithm> 38 using namespace llvm; 39 using namespace llvm::PatternMatch; 40 41 #define DEBUG_TYPE "instsimplify" 42 43 enum { RecursionLimit = 3 }; 44 45 STATISTIC(NumExpand, "Number of expansions"); 46 STATISTIC(NumReassoc, "Number of reassociations"); 47 48 namespace { 49 struct Query { 50 const DataLayout &DL; 51 const TargetLibraryInfo *TLI; 52 const DominatorTree *DT; 53 AssumptionCache *AC; 54 const Instruction *CxtI; 55 56 Query(const DataLayout &DL, const TargetLibraryInfo *tli, 57 const DominatorTree *dt, AssumptionCache *ac = nullptr, 58 const Instruction *cxti = nullptr) 59 : DL(DL), TLI(tli), DT(dt), AC(ac), CxtI(cxti) {} 60 }; 61 } // end anonymous namespace 62 63 static Value *SimplifyAndInst(Value *, Value *, const Query &, unsigned); 64 static Value *SimplifyBinOp(unsigned, Value *, Value *, const Query &, 65 unsigned); 66 static Value *SimplifyFPBinOp(unsigned, Value *, Value *, const FastMathFlags &, 67 const Query &, unsigned); 68 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const Query &, 69 unsigned); 70 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 71 const Query &Q, unsigned MaxRecurse); 72 static Value *SimplifyOrInst(Value *, Value *, const Query &, unsigned); 73 static Value *SimplifyXorInst(Value *, Value *, const Query &, unsigned); 74 static Value *SimplifyCastInst(unsigned, Value *, Type *, 75 const Query &, unsigned); 76 77 /// For a boolean type, or a vector of boolean type, return false, or 78 /// a vector with every element false, as appropriate for the type. 79 static Constant *getFalse(Type *Ty) { 80 assert(Ty->getScalarType()->isIntegerTy(1) && 81 "Expected i1 type or a vector of i1!"); 82 return Constant::getNullValue(Ty); 83 } 84 85 /// For a boolean type, or a vector of boolean type, return true, or 86 /// a vector with every element true, as appropriate for the type. 87 static Constant *getTrue(Type *Ty) { 88 assert(Ty->getScalarType()->isIntegerTy(1) && 89 "Expected i1 type or a vector of i1!"); 90 return Constant::getAllOnesValue(Ty); 91 } 92 93 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"? 94 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, 95 Value *RHS) { 96 CmpInst *Cmp = dyn_cast<CmpInst>(V); 97 if (!Cmp) 98 return false; 99 CmpInst::Predicate CPred = Cmp->getPredicate(); 100 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1); 101 if (CPred == Pred && CLHS == LHS && CRHS == RHS) 102 return true; 103 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS && 104 CRHS == LHS; 105 } 106 107 /// Does the given value dominate the specified phi node? 108 static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) { 109 Instruction *I = dyn_cast<Instruction>(V); 110 if (!I) 111 // Arguments and constants dominate all instructions. 112 return true; 113 114 // If we are processing instructions (and/or basic blocks) that have not been 115 // fully added to a function, the parent nodes may still be null. Simply 116 // return the conservative answer in these cases. 117 if (!I->getParent() || !P->getParent() || !I->getParent()->getParent()) 118 return false; 119 120 // If we have a DominatorTree then do a precise test. 121 if (DT) { 122 if (!DT->isReachableFromEntry(P->getParent())) 123 return true; 124 if (!DT->isReachableFromEntry(I->getParent())) 125 return false; 126 return DT->dominates(I, P); 127 } 128 129 // Otherwise, if the instruction is in the entry block and is not an invoke, 130 // then it obviously dominates all phi nodes. 131 if (I->getParent() == &I->getParent()->getParent()->getEntryBlock() && 132 !isa<InvokeInst>(I)) 133 return true; 134 135 return false; 136 } 137 138 /// Simplify "A op (B op' C)" by distributing op over op', turning it into 139 /// "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is 140 /// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS. 141 /// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)". 142 /// Returns the simplified value, or null if no simplification was performed. 143 static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS, 144 unsigned OpcToExpand, const Query &Q, 145 unsigned MaxRecurse) { 146 Instruction::BinaryOps OpcodeToExpand = (Instruction::BinaryOps)OpcToExpand; 147 // Recursion is always used, so bail out at once if we already hit the limit. 148 if (!MaxRecurse--) 149 return nullptr; 150 151 // Check whether the expression has the form "(A op' B) op C". 152 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS)) 153 if (Op0->getOpcode() == OpcodeToExpand) { 154 // It does! Try turning it into "(A op C) op' (B op C)". 155 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 156 // Do "A op C" and "B op C" both simplify? 157 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) 158 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 159 // They do! Return "L op' R" if it simplifies or is already available. 160 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 161 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand) 162 && L == B && R == A)) { 163 ++NumExpand; 164 return LHS; 165 } 166 // Otherwise return "L op' R" if it simplifies. 167 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 168 ++NumExpand; 169 return V; 170 } 171 } 172 } 173 174 // Check whether the expression has the form "A op (B op' C)". 175 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS)) 176 if (Op1->getOpcode() == OpcodeToExpand) { 177 // It does! Try turning it into "(A op B) op' (A op C)". 178 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 179 // Do "A op B" and "A op C" both simplify? 180 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) 181 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) { 182 // They do! Return "L op' R" if it simplifies or is already available. 183 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 184 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand) 185 && L == C && R == B)) { 186 ++NumExpand; 187 return RHS; 188 } 189 // Otherwise return "L op' R" if it simplifies. 190 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 191 ++NumExpand; 192 return V; 193 } 194 } 195 } 196 197 return nullptr; 198 } 199 200 /// Generic simplifications for associative binary operations. 201 /// Returns the simpler value, or null if none was found. 202 static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS, 203 const Query &Q, unsigned MaxRecurse) { 204 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)Opc; 205 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!"); 206 207 // Recursion is always used, so bail out at once if we already hit the limit. 208 if (!MaxRecurse--) 209 return nullptr; 210 211 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 212 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 213 214 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely. 215 if (Op0 && Op0->getOpcode() == Opcode) { 216 Value *A = Op0->getOperand(0); 217 Value *B = Op0->getOperand(1); 218 Value *C = RHS; 219 220 // Does "B op C" simplify? 221 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 222 // It does! Return "A op V" if it simplifies or is already available. 223 // If V equals B then "A op V" is just the LHS. 224 if (V == B) return LHS; 225 // Otherwise return "A op V" if it simplifies. 226 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) { 227 ++NumReassoc; 228 return W; 229 } 230 } 231 } 232 233 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely. 234 if (Op1 && Op1->getOpcode() == Opcode) { 235 Value *A = LHS; 236 Value *B = Op1->getOperand(0); 237 Value *C = Op1->getOperand(1); 238 239 // Does "A op B" simplify? 240 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) { 241 // It does! Return "V op C" if it simplifies or is already available. 242 // If V equals B then "V op C" is just the RHS. 243 if (V == B) return RHS; 244 // Otherwise return "V op C" if it simplifies. 245 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) { 246 ++NumReassoc; 247 return W; 248 } 249 } 250 } 251 252 // The remaining transforms require commutativity as well as associativity. 253 if (!Instruction::isCommutative(Opcode)) 254 return nullptr; 255 256 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely. 257 if (Op0 && Op0->getOpcode() == Opcode) { 258 Value *A = Op0->getOperand(0); 259 Value *B = Op0->getOperand(1); 260 Value *C = RHS; 261 262 // Does "C op A" simplify? 263 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 264 // It does! Return "V op B" if it simplifies or is already available. 265 // If V equals A then "V op B" is just the LHS. 266 if (V == A) return LHS; 267 // Otherwise return "V op B" if it simplifies. 268 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) { 269 ++NumReassoc; 270 return W; 271 } 272 } 273 } 274 275 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely. 276 if (Op1 && Op1->getOpcode() == Opcode) { 277 Value *A = LHS; 278 Value *B = Op1->getOperand(0); 279 Value *C = Op1->getOperand(1); 280 281 // Does "C op A" simplify? 282 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 283 // It does! Return "B op V" if it simplifies or is already available. 284 // If V equals C then "B op V" is just the RHS. 285 if (V == C) return RHS; 286 // Otherwise return "B op V" if it simplifies. 287 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) { 288 ++NumReassoc; 289 return W; 290 } 291 } 292 } 293 294 return nullptr; 295 } 296 297 /// In the case of a binary operation with a select instruction as an operand, 298 /// try to simplify the binop by seeing whether evaluating it on both branches 299 /// of the select results in the same value. Returns the common value if so, 300 /// otherwise returns null. 301 static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS, 302 const Query &Q, unsigned MaxRecurse) { 303 // Recursion is always used, so bail out at once if we already hit the limit. 304 if (!MaxRecurse--) 305 return nullptr; 306 307 SelectInst *SI; 308 if (isa<SelectInst>(LHS)) { 309 SI = cast<SelectInst>(LHS); 310 } else { 311 assert(isa<SelectInst>(RHS) && "No select instruction operand!"); 312 SI = cast<SelectInst>(RHS); 313 } 314 315 // Evaluate the BinOp on the true and false branches of the select. 316 Value *TV; 317 Value *FV; 318 if (SI == LHS) { 319 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse); 320 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse); 321 } else { 322 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse); 323 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse); 324 } 325 326 // If they simplified to the same value, then return the common value. 327 // If they both failed to simplify then return null. 328 if (TV == FV) 329 return TV; 330 331 // If one branch simplified to undef, return the other one. 332 if (TV && isa<UndefValue>(TV)) 333 return FV; 334 if (FV && isa<UndefValue>(FV)) 335 return TV; 336 337 // If applying the operation did not change the true and false select values, 338 // then the result of the binop is the select itself. 339 if (TV == SI->getTrueValue() && FV == SI->getFalseValue()) 340 return SI; 341 342 // If one branch simplified and the other did not, and the simplified 343 // value is equal to the unsimplified one, return the simplified value. 344 // For example, select (cond, X, X & Z) & Z -> X & Z. 345 if ((FV && !TV) || (TV && !FV)) { 346 // Check that the simplified value has the form "X op Y" where "op" is the 347 // same as the original operation. 348 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV); 349 if (Simplified && Simplified->getOpcode() == Opcode) { 350 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS". 351 // We already know that "op" is the same as for the simplified value. See 352 // if the operands match too. If so, return the simplified value. 353 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue(); 354 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS; 355 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch; 356 if (Simplified->getOperand(0) == UnsimplifiedLHS && 357 Simplified->getOperand(1) == UnsimplifiedRHS) 358 return Simplified; 359 if (Simplified->isCommutative() && 360 Simplified->getOperand(1) == UnsimplifiedLHS && 361 Simplified->getOperand(0) == UnsimplifiedRHS) 362 return Simplified; 363 } 364 } 365 366 return nullptr; 367 } 368 369 /// In the case of a comparison with a select instruction, try to simplify the 370 /// comparison by seeing whether both branches of the select result in the same 371 /// value. Returns the common value if so, otherwise returns null. 372 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS, 373 Value *RHS, const Query &Q, 374 unsigned MaxRecurse) { 375 // Recursion is always used, so bail out at once if we already hit the limit. 376 if (!MaxRecurse--) 377 return nullptr; 378 379 // Make sure the select is on the LHS. 380 if (!isa<SelectInst>(LHS)) { 381 std::swap(LHS, RHS); 382 Pred = CmpInst::getSwappedPredicate(Pred); 383 } 384 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!"); 385 SelectInst *SI = cast<SelectInst>(LHS); 386 Value *Cond = SI->getCondition(); 387 Value *TV = SI->getTrueValue(); 388 Value *FV = SI->getFalseValue(); 389 390 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it. 391 // Does "cmp TV, RHS" simplify? 392 Value *TCmp = SimplifyCmpInst(Pred, TV, RHS, Q, MaxRecurse); 393 if (TCmp == Cond) { 394 // It not only simplified, it simplified to the select condition. Replace 395 // it with 'true'. 396 TCmp = getTrue(Cond->getType()); 397 } else if (!TCmp) { 398 // It didn't simplify. However if "cmp TV, RHS" is equal to the select 399 // condition then we can replace it with 'true'. Otherwise give up. 400 if (!isSameCompare(Cond, Pred, TV, RHS)) 401 return nullptr; 402 TCmp = getTrue(Cond->getType()); 403 } 404 405 // Does "cmp FV, RHS" simplify? 406 Value *FCmp = SimplifyCmpInst(Pred, FV, RHS, Q, MaxRecurse); 407 if (FCmp == Cond) { 408 // It not only simplified, it simplified to the select condition. Replace 409 // it with 'false'. 410 FCmp = getFalse(Cond->getType()); 411 } else if (!FCmp) { 412 // It didn't simplify. However if "cmp FV, RHS" is equal to the select 413 // condition then we can replace it with 'false'. Otherwise give up. 414 if (!isSameCompare(Cond, Pred, FV, RHS)) 415 return nullptr; 416 FCmp = getFalse(Cond->getType()); 417 } 418 419 // If both sides simplified to the same value, then use it as the result of 420 // the original comparison. 421 if (TCmp == FCmp) 422 return TCmp; 423 424 // The remaining cases only make sense if the select condition has the same 425 // type as the result of the comparison, so bail out if this is not so. 426 if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy()) 427 return nullptr; 428 // If the false value simplified to false, then the result of the compare 429 // is equal to "Cond && TCmp". This also catches the case when the false 430 // value simplified to false and the true value to true, returning "Cond". 431 if (match(FCmp, m_Zero())) 432 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse)) 433 return V; 434 // If the true value simplified to true, then the result of the compare 435 // is equal to "Cond || FCmp". 436 if (match(TCmp, m_One())) 437 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse)) 438 return V; 439 // Finally, if the false value simplified to true and the true value to 440 // false, then the result of the compare is equal to "!Cond". 441 if (match(FCmp, m_One()) && match(TCmp, m_Zero())) 442 if (Value *V = 443 SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()), 444 Q, MaxRecurse)) 445 return V; 446 447 return nullptr; 448 } 449 450 /// In the case of a binary operation with an operand that is a PHI instruction, 451 /// try to simplify the binop by seeing whether evaluating it on the incoming 452 /// phi values yields the same result for every value. If so returns the common 453 /// value, otherwise returns null. 454 static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS, 455 const Query &Q, unsigned MaxRecurse) { 456 // Recursion is always used, so bail out at once if we already hit the limit. 457 if (!MaxRecurse--) 458 return nullptr; 459 460 PHINode *PI; 461 if (isa<PHINode>(LHS)) { 462 PI = cast<PHINode>(LHS); 463 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 464 if (!ValueDominatesPHI(RHS, PI, Q.DT)) 465 return nullptr; 466 } else { 467 assert(isa<PHINode>(RHS) && "No PHI instruction operand!"); 468 PI = cast<PHINode>(RHS); 469 // Bail out if LHS and the phi may be mutually interdependent due to a loop. 470 if (!ValueDominatesPHI(LHS, PI, Q.DT)) 471 return nullptr; 472 } 473 474 // Evaluate the BinOp on the incoming phi values. 475 Value *CommonValue = nullptr; 476 for (Value *Incoming : PI->incoming_values()) { 477 // If the incoming value is the phi node itself, it can safely be skipped. 478 if (Incoming == PI) continue; 479 Value *V = PI == LHS ? 480 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) : 481 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse); 482 // If the operation failed to simplify, or simplified to a different value 483 // to previously, then give up. 484 if (!V || (CommonValue && V != CommonValue)) 485 return nullptr; 486 CommonValue = V; 487 } 488 489 return CommonValue; 490 } 491 492 /// In the case of a comparison with a PHI instruction, try to simplify the 493 /// comparison by seeing whether comparing with all of the incoming phi values 494 /// yields the same result every time. If so returns the common result, 495 /// otherwise returns null. 496 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS, 497 const Query &Q, unsigned MaxRecurse) { 498 // Recursion is always used, so bail out at once if we already hit the limit. 499 if (!MaxRecurse--) 500 return nullptr; 501 502 // Make sure the phi is on the LHS. 503 if (!isa<PHINode>(LHS)) { 504 std::swap(LHS, RHS); 505 Pred = CmpInst::getSwappedPredicate(Pred); 506 } 507 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!"); 508 PHINode *PI = cast<PHINode>(LHS); 509 510 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 511 if (!ValueDominatesPHI(RHS, PI, Q.DT)) 512 return nullptr; 513 514 // Evaluate the BinOp on the incoming phi values. 515 Value *CommonValue = nullptr; 516 for (Value *Incoming : PI->incoming_values()) { 517 // If the incoming value is the phi node itself, it can safely be skipped. 518 if (Incoming == PI) continue; 519 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q, MaxRecurse); 520 // If the operation failed to simplify, or simplified to a different value 521 // to previously, then give up. 522 if (!V || (CommonValue && V != CommonValue)) 523 return nullptr; 524 CommonValue = V; 525 } 526 527 return CommonValue; 528 } 529 530 /// Given operands for an Add, see if we can fold the result. 531 /// If not, this returns null. 532 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 533 const Query &Q, unsigned MaxRecurse) { 534 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 535 if (Constant *CRHS = dyn_cast<Constant>(Op1)) 536 return ConstantFoldBinaryOpOperands(Instruction::Add, CLHS, CRHS, Q.DL); 537 538 // Canonicalize the constant to the RHS. 539 std::swap(Op0, Op1); 540 } 541 542 // X + undef -> undef 543 if (match(Op1, m_Undef())) 544 return Op1; 545 546 // X + 0 -> X 547 if (match(Op1, m_Zero())) 548 return Op0; 549 550 // X + (Y - X) -> Y 551 // (Y - X) + X -> Y 552 // Eg: X + -X -> 0 553 Value *Y = nullptr; 554 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) || 555 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1)))) 556 return Y; 557 558 // X + ~X -> -1 since ~X = -X-1 559 if (match(Op0, m_Not(m_Specific(Op1))) || 560 match(Op1, m_Not(m_Specific(Op0)))) 561 return Constant::getAllOnesValue(Op0->getType()); 562 563 /// i1 add -> xor. 564 if (MaxRecurse && Op0->getType()->isIntegerTy(1)) 565 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 566 return V; 567 568 // Try some generic simplifications for associative operations. 569 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, 570 MaxRecurse)) 571 return V; 572 573 // Threading Add over selects and phi nodes is pointless, so don't bother. 574 // Threading over the select in "A + select(cond, B, C)" means evaluating 575 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and 576 // only if B and C are equal. If B and C are equal then (since we assume 577 // that operands have already been simplified) "select(cond, B, C)" should 578 // have been simplified to the common value of B and C already. Analysing 579 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly 580 // for threading over phi nodes. 581 582 return nullptr; 583 } 584 585 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 586 const DataLayout &DL, const TargetLibraryInfo *TLI, 587 const DominatorTree *DT, AssumptionCache *AC, 588 const Instruction *CxtI) { 589 return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI), 590 RecursionLimit); 591 } 592 593 /// \brief Compute the base pointer and cumulative constant offsets for V. 594 /// 595 /// This strips all constant offsets off of V, leaving it the base pointer, and 596 /// accumulates the total constant offset applied in the returned constant. It 597 /// returns 0 if V is not a pointer, and returns the constant '0' if there are 598 /// no constant offsets applied. 599 /// 600 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't 601 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc. 602 /// folding. 603 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, 604 bool AllowNonInbounds = false) { 605 assert(V->getType()->getScalarType()->isPointerTy()); 606 607 Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType(); 608 APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth()); 609 610 // Even though we don't look through PHI nodes, we could be called on an 611 // instruction in an unreachable block, which may be on a cycle. 612 SmallPtrSet<Value *, 4> Visited; 613 Visited.insert(V); 614 do { 615 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 616 if ((!AllowNonInbounds && !GEP->isInBounds()) || 617 !GEP->accumulateConstantOffset(DL, Offset)) 618 break; 619 V = GEP->getPointerOperand(); 620 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 621 V = cast<Operator>(V)->getOperand(0); 622 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 623 if (GA->isInterposable()) 624 break; 625 V = GA->getAliasee(); 626 } else { 627 if (auto CS = CallSite(V)) 628 if (Value *RV = CS.getReturnedArgOperand()) { 629 V = RV; 630 continue; 631 } 632 break; 633 } 634 assert(V->getType()->getScalarType()->isPointerTy() && 635 "Unexpected operand type!"); 636 } while (Visited.insert(V).second); 637 638 Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset); 639 if (V->getType()->isVectorTy()) 640 return ConstantVector::getSplat(V->getType()->getVectorNumElements(), 641 OffsetIntPtr); 642 return OffsetIntPtr; 643 } 644 645 /// \brief Compute the constant difference between two pointer values. 646 /// If the difference is not a constant, returns zero. 647 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS, 648 Value *RHS) { 649 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 650 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 651 652 // If LHS and RHS are not related via constant offsets to the same base 653 // value, there is nothing we can do here. 654 if (LHS != RHS) 655 return nullptr; 656 657 // Otherwise, the difference of LHS - RHS can be computed as: 658 // LHS - RHS 659 // = (LHSOffset + Base) - (RHSOffset + Base) 660 // = LHSOffset - RHSOffset 661 return ConstantExpr::getSub(LHSOffset, RHSOffset); 662 } 663 664 /// Given operands for a Sub, see if we can fold the result. 665 /// If not, this returns null. 666 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 667 const Query &Q, unsigned MaxRecurse) { 668 if (Constant *CLHS = dyn_cast<Constant>(Op0)) 669 if (Constant *CRHS = dyn_cast<Constant>(Op1)) 670 return ConstantFoldBinaryOpOperands(Instruction::Sub, CLHS, CRHS, Q.DL); 671 672 // X - undef -> undef 673 // undef - X -> undef 674 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 675 return UndefValue::get(Op0->getType()); 676 677 // X - 0 -> X 678 if (match(Op1, m_Zero())) 679 return Op0; 680 681 // X - X -> 0 682 if (Op0 == Op1) 683 return Constant::getNullValue(Op0->getType()); 684 685 // Is this a negation? 686 if (match(Op0, m_Zero())) { 687 // 0 - X -> 0 if the sub is NUW. 688 if (isNUW) 689 return Op0; 690 691 unsigned BitWidth = Op1->getType()->getScalarSizeInBits(); 692 APInt KnownZero(BitWidth, 0); 693 APInt KnownOne(BitWidth, 0); 694 computeKnownBits(Op1, KnownZero, KnownOne, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 695 if (KnownZero == ~APInt::getSignBit(BitWidth)) { 696 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then 697 // Op1 must be 0 because negating the minimum signed value is undefined. 698 if (isNSW) 699 return Op0; 700 701 // 0 - X -> X if X is 0 or the minimum signed value. 702 return Op1; 703 } 704 } 705 706 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies. 707 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X 708 Value *X = nullptr, *Y = nullptr, *Z = Op1; 709 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z 710 // See if "V === Y - Z" simplifies. 711 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1)) 712 // It does! Now see if "X + V" simplifies. 713 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) { 714 // It does, we successfully reassociated! 715 ++NumReassoc; 716 return W; 717 } 718 // See if "V === X - Z" simplifies. 719 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 720 // It does! Now see if "Y + V" simplifies. 721 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) { 722 // It does, we successfully reassociated! 723 ++NumReassoc; 724 return W; 725 } 726 } 727 728 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies. 729 // For example, X - (X + 1) -> -1 730 X = Op0; 731 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z) 732 // See if "V === X - Y" simplifies. 733 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 734 // It does! Now see if "V - Z" simplifies. 735 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) { 736 // It does, we successfully reassociated! 737 ++NumReassoc; 738 return W; 739 } 740 // See if "V === X - Z" simplifies. 741 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 742 // It does! Now see if "V - Y" simplifies. 743 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) { 744 // It does, we successfully reassociated! 745 ++NumReassoc; 746 return W; 747 } 748 } 749 750 // Z - (X - Y) -> (Z - X) + Y if everything simplifies. 751 // For example, X - (X - Y) -> Y. 752 Z = Op0; 753 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y) 754 // See if "V === Z - X" simplifies. 755 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1)) 756 // It does! Now see if "V + Y" simplifies. 757 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) { 758 // It does, we successfully reassociated! 759 ++NumReassoc; 760 return W; 761 } 762 763 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies. 764 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) && 765 match(Op1, m_Trunc(m_Value(Y)))) 766 if (X->getType() == Y->getType()) 767 // See if "V === X - Y" simplifies. 768 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 769 // It does! Now see if "trunc V" simplifies. 770 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(), 771 Q, MaxRecurse - 1)) 772 // It does, return the simplified "trunc V". 773 return W; 774 775 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...). 776 if (match(Op0, m_PtrToInt(m_Value(X))) && 777 match(Op1, m_PtrToInt(m_Value(Y)))) 778 if (Constant *Result = computePointerDifference(Q.DL, X, Y)) 779 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true); 780 781 // i1 sub -> xor. 782 if (MaxRecurse && Op0->getType()->isIntegerTy(1)) 783 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 784 return V; 785 786 // Threading Sub over selects and phi nodes is pointless, so don't bother. 787 // Threading over the select in "A - select(cond, B, C)" means evaluating 788 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and 789 // only if B and C are equal. If B and C are equal then (since we assume 790 // that operands have already been simplified) "select(cond, B, C)" should 791 // have been simplified to the common value of B and C already. Analysing 792 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly 793 // for threading over phi nodes. 794 795 return nullptr; 796 } 797 798 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 799 const DataLayout &DL, const TargetLibraryInfo *TLI, 800 const DominatorTree *DT, AssumptionCache *AC, 801 const Instruction *CxtI) { 802 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI), 803 RecursionLimit); 804 } 805 806 /// Given operands for an FAdd, see if we can fold the result. If not, this 807 /// returns null. 808 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 809 const Query &Q, unsigned MaxRecurse) { 810 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 811 if (Constant *CRHS = dyn_cast<Constant>(Op1)) 812 return ConstantFoldBinaryOpOperands(Instruction::FAdd, CLHS, CRHS, Q.DL); 813 814 // Canonicalize the constant to the RHS. 815 std::swap(Op0, Op1); 816 } 817 818 // fadd X, -0 ==> X 819 if (match(Op1, m_NegZero())) 820 return Op0; 821 822 // fadd X, 0 ==> X, when we know X is not -0 823 if (match(Op1, m_Zero()) && 824 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) 825 return Op0; 826 827 // fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0 828 // where nnan and ninf have to occur at least once somewhere in this 829 // expression 830 Value *SubOp = nullptr; 831 if (match(Op1, m_FSub(m_AnyZero(), m_Specific(Op0)))) 832 SubOp = Op1; 833 else if (match(Op0, m_FSub(m_AnyZero(), m_Specific(Op1)))) 834 SubOp = Op0; 835 if (SubOp) { 836 Instruction *FSub = cast<Instruction>(SubOp); 837 if ((FMF.noNaNs() || FSub->hasNoNaNs()) && 838 (FMF.noInfs() || FSub->hasNoInfs())) 839 return Constant::getNullValue(Op0->getType()); 840 } 841 842 return nullptr; 843 } 844 845 /// Given operands for an FSub, see if we can fold the result. If not, this 846 /// returns null. 847 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 848 const Query &Q, unsigned MaxRecurse) { 849 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 850 if (Constant *CRHS = dyn_cast<Constant>(Op1)) 851 return ConstantFoldBinaryOpOperands(Instruction::FSub, CLHS, CRHS, Q.DL); 852 } 853 854 // fsub X, 0 ==> X 855 if (match(Op1, m_Zero())) 856 return Op0; 857 858 // fsub X, -0 ==> X, when we know X is not -0 859 if (match(Op1, m_NegZero()) && 860 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) 861 return Op0; 862 863 // fsub -0.0, (fsub -0.0, X) ==> X 864 Value *X; 865 if (match(Op0, m_NegZero()) && match(Op1, m_FSub(m_NegZero(), m_Value(X)))) 866 return X; 867 868 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored. 869 if (FMF.noSignedZeros() && match(Op0, m_AnyZero()) && 870 match(Op1, m_FSub(m_AnyZero(), m_Value(X)))) 871 return X; 872 873 // fsub nnan x, x ==> 0.0 874 if (FMF.noNaNs() && Op0 == Op1) 875 return Constant::getNullValue(Op0->getType()); 876 877 return nullptr; 878 } 879 880 /// Given the operands for an FMul, see if we can fold the result 881 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, 882 FastMathFlags FMF, 883 const Query &Q, 884 unsigned MaxRecurse) { 885 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 886 if (Constant *CRHS = dyn_cast<Constant>(Op1)) 887 return ConstantFoldBinaryOpOperands(Instruction::FMul, CLHS, CRHS, Q.DL); 888 889 // Canonicalize the constant to the RHS. 890 std::swap(Op0, Op1); 891 } 892 893 // fmul X, 1.0 ==> X 894 if (match(Op1, m_FPOne())) 895 return Op0; 896 897 // fmul nnan nsz X, 0 ==> 0 898 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZero())) 899 return Op1; 900 901 return nullptr; 902 } 903 904 /// Given operands for a Mul, see if we can fold the result. 905 /// If not, this returns null. 906 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q, 907 unsigned MaxRecurse) { 908 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 909 if (Constant *CRHS = dyn_cast<Constant>(Op1)) 910 return ConstantFoldBinaryOpOperands(Instruction::Mul, CLHS, CRHS, Q.DL); 911 912 // Canonicalize the constant to the RHS. 913 std::swap(Op0, Op1); 914 } 915 916 // X * undef -> 0 917 if (match(Op1, m_Undef())) 918 return Constant::getNullValue(Op0->getType()); 919 920 // X * 0 -> 0 921 if (match(Op1, m_Zero())) 922 return Op1; 923 924 // X * 1 -> X 925 if (match(Op1, m_One())) 926 return Op0; 927 928 // (X / Y) * Y -> X if the division is exact. 929 Value *X = nullptr; 930 if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y 931 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y) 932 return X; 933 934 // i1 mul -> and. 935 if (MaxRecurse && Op0->getType()->isIntegerTy(1)) 936 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1)) 937 return V; 938 939 // Try some generic simplifications for associative operations. 940 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, 941 MaxRecurse)) 942 return V; 943 944 // Mul distributes over Add. Try some generic simplifications based on this. 945 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add, 946 Q, MaxRecurse)) 947 return V; 948 949 // If the operation is with the result of a select instruction, check whether 950 // operating on either branch of the select always yields the same value. 951 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 952 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, 953 MaxRecurse)) 954 return V; 955 956 // If the operation is with the result of a phi instruction, check whether 957 // operating on all incoming values of the phi always yields the same value. 958 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 959 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, 960 MaxRecurse)) 961 return V; 962 963 return nullptr; 964 } 965 966 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 967 const DataLayout &DL, 968 const TargetLibraryInfo *TLI, 969 const DominatorTree *DT, AssumptionCache *AC, 970 const Instruction *CxtI) { 971 return ::SimplifyFAddInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI), 972 RecursionLimit); 973 } 974 975 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 976 const DataLayout &DL, 977 const TargetLibraryInfo *TLI, 978 const DominatorTree *DT, AssumptionCache *AC, 979 const Instruction *CxtI) { 980 return ::SimplifyFSubInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI), 981 RecursionLimit); 982 } 983 984 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, 985 const DataLayout &DL, 986 const TargetLibraryInfo *TLI, 987 const DominatorTree *DT, AssumptionCache *AC, 988 const Instruction *CxtI) { 989 return ::SimplifyFMulInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI), 990 RecursionLimit); 991 } 992 993 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout &DL, 994 const TargetLibraryInfo *TLI, 995 const DominatorTree *DT, AssumptionCache *AC, 996 const Instruction *CxtI) { 997 return ::SimplifyMulInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI), 998 RecursionLimit); 999 } 1000 1001 /// Given operands for an SDiv or UDiv, see if we can fold the result. 1002 /// If not, this returns null. 1003 static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 1004 const Query &Q, unsigned MaxRecurse) { 1005 if (Constant *C0 = dyn_cast<Constant>(Op0)) 1006 if (Constant *C1 = dyn_cast<Constant>(Op1)) 1007 return ConstantFoldBinaryOpOperands(Opcode, C0, C1, Q.DL); 1008 1009 bool isSigned = Opcode == Instruction::SDiv; 1010 1011 // X / undef -> undef 1012 if (match(Op1, m_Undef())) 1013 return Op1; 1014 1015 // X / 0 -> undef, we don't need to preserve faults! 1016 if (match(Op1, m_Zero())) 1017 return UndefValue::get(Op1->getType()); 1018 1019 // undef / X -> 0 1020 if (match(Op0, m_Undef())) 1021 return Constant::getNullValue(Op0->getType()); 1022 1023 // 0 / X -> 0, we don't need to preserve faults! 1024 if (match(Op0, m_Zero())) 1025 return Op0; 1026 1027 // X / 1 -> X 1028 if (match(Op1, m_One())) 1029 return Op0; 1030 1031 if (Op0->getType()->isIntegerTy(1)) 1032 // It can't be division by zero, hence it must be division by one. 1033 return Op0; 1034 1035 // X / X -> 1 1036 if (Op0 == Op1) 1037 return ConstantInt::get(Op0->getType(), 1); 1038 1039 // (X * Y) / Y -> X if the multiplication does not overflow. 1040 Value *X = nullptr, *Y = nullptr; 1041 if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) { 1042 if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1 1043 OverflowingBinaryOperator *Mul = cast<OverflowingBinaryOperator>(Op0); 1044 // If the Mul knows it does not overflow, then we are good to go. 1045 if ((isSigned && Mul->hasNoSignedWrap()) || 1046 (!isSigned && Mul->hasNoUnsignedWrap())) 1047 return X; 1048 // If X has the form X = A / Y then X * Y cannot overflow. 1049 if (BinaryOperator *Div = dyn_cast<BinaryOperator>(X)) 1050 if (Div->getOpcode() == Opcode && Div->getOperand(1) == Y) 1051 return X; 1052 } 1053 1054 // (X rem Y) / Y -> 0 1055 if ((isSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 1056 (!isSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 1057 return Constant::getNullValue(Op0->getType()); 1058 1059 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow 1060 ConstantInt *C1, *C2; 1061 if (!isSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) && 1062 match(Op1, m_ConstantInt(C2))) { 1063 bool Overflow; 1064 C1->getValue().umul_ov(C2->getValue(), Overflow); 1065 if (Overflow) 1066 return Constant::getNullValue(Op0->getType()); 1067 } 1068 1069 // If the operation is with the result of a select instruction, check whether 1070 // operating on either branch of the select always yields the same value. 1071 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1072 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1073 return V; 1074 1075 // If the operation is with the result of a phi instruction, check whether 1076 // operating on all incoming values of the phi always yields the same value. 1077 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1078 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1079 return V; 1080 1081 return nullptr; 1082 } 1083 1084 /// Given operands for an SDiv, see if we can fold the result. 1085 /// If not, this returns null. 1086 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q, 1087 unsigned MaxRecurse) { 1088 if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse)) 1089 return V; 1090 1091 return nullptr; 1092 } 1093 1094 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout &DL, 1095 const TargetLibraryInfo *TLI, 1096 const DominatorTree *DT, AssumptionCache *AC, 1097 const Instruction *CxtI) { 1098 return ::SimplifySDivInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI), 1099 RecursionLimit); 1100 } 1101 1102 /// Given operands for a UDiv, see if we can fold the result. 1103 /// If not, this returns null. 1104 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q, 1105 unsigned MaxRecurse) { 1106 if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse)) 1107 return V; 1108 1109 return nullptr; 1110 } 1111 1112 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout &DL, 1113 const TargetLibraryInfo *TLI, 1114 const DominatorTree *DT, AssumptionCache *AC, 1115 const Instruction *CxtI) { 1116 return ::SimplifyUDivInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI), 1117 RecursionLimit); 1118 } 1119 1120 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, 1121 const Query &Q, unsigned) { 1122 // undef / X -> undef (the undef could be a snan). 1123 if (match(Op0, m_Undef())) 1124 return Op0; 1125 1126 // X / undef -> undef 1127 if (match(Op1, m_Undef())) 1128 return Op1; 1129 1130 // 0 / X -> 0 1131 // Requires that NaNs are off (X could be zero) and signed zeroes are 1132 // ignored (X could be positive or negative, so the output sign is unknown). 1133 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero())) 1134 return Op0; 1135 1136 if (FMF.noNaNs()) { 1137 // X / X -> 1.0 is legal when NaNs are ignored. 1138 if (Op0 == Op1) 1139 return ConstantFP::get(Op0->getType(), 1.0); 1140 1141 // -X / X -> -1.0 and 1142 // X / -X -> -1.0 are legal when NaNs are ignored. 1143 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored. 1144 if ((BinaryOperator::isFNeg(Op0, /*IgnoreZeroSign=*/true) && 1145 BinaryOperator::getFNegArgument(Op0) == Op1) || 1146 (BinaryOperator::isFNeg(Op1, /*IgnoreZeroSign=*/true) && 1147 BinaryOperator::getFNegArgument(Op1) == Op0)) 1148 return ConstantFP::get(Op0->getType(), -1.0); 1149 } 1150 1151 return nullptr; 1152 } 1153 1154 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, 1155 const DataLayout &DL, 1156 const TargetLibraryInfo *TLI, 1157 const DominatorTree *DT, AssumptionCache *AC, 1158 const Instruction *CxtI) { 1159 return ::SimplifyFDivInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI), 1160 RecursionLimit); 1161 } 1162 1163 /// Given operands for an SRem or URem, see if we can fold the result. 1164 /// If not, this returns null. 1165 static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 1166 const Query &Q, unsigned MaxRecurse) { 1167 if (Constant *C0 = dyn_cast<Constant>(Op0)) 1168 if (Constant *C1 = dyn_cast<Constant>(Op1)) 1169 return ConstantFoldBinaryOpOperands(Opcode, C0, C1, Q.DL); 1170 1171 // X % undef -> undef 1172 if (match(Op1, m_Undef())) 1173 return Op1; 1174 1175 // undef % X -> 0 1176 if (match(Op0, m_Undef())) 1177 return Constant::getNullValue(Op0->getType()); 1178 1179 // 0 % X -> 0, we don't need to preserve faults! 1180 if (match(Op0, m_Zero())) 1181 return Op0; 1182 1183 // X % 0 -> undef, we don't need to preserve faults! 1184 if (match(Op1, m_Zero())) 1185 return UndefValue::get(Op0->getType()); 1186 1187 // X % 1 -> 0 1188 if (match(Op1, m_One())) 1189 return Constant::getNullValue(Op0->getType()); 1190 1191 if (Op0->getType()->isIntegerTy(1)) 1192 // It can't be remainder by zero, hence it must be remainder by one. 1193 return Constant::getNullValue(Op0->getType()); 1194 1195 // X % X -> 0 1196 if (Op0 == Op1) 1197 return Constant::getNullValue(Op0->getType()); 1198 1199 // (X % Y) % Y -> X % Y 1200 if ((Opcode == Instruction::SRem && 1201 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 1202 (Opcode == Instruction::URem && 1203 match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 1204 return Op0; 1205 1206 // If the operation is with the result of a select instruction, check whether 1207 // operating on either branch of the select always yields the same value. 1208 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1209 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1210 return V; 1211 1212 // If the operation is with the result of a phi instruction, check whether 1213 // operating on all incoming values of the phi always yields the same value. 1214 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1215 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1216 return V; 1217 1218 return nullptr; 1219 } 1220 1221 /// Given operands for an SRem, see if we can fold the result. 1222 /// If not, this returns null. 1223 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q, 1224 unsigned MaxRecurse) { 1225 if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse)) 1226 return V; 1227 1228 return nullptr; 1229 } 1230 1231 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout &DL, 1232 const TargetLibraryInfo *TLI, 1233 const DominatorTree *DT, AssumptionCache *AC, 1234 const Instruction *CxtI) { 1235 return ::SimplifySRemInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI), 1236 RecursionLimit); 1237 } 1238 1239 /// Given operands for a URem, see if we can fold the result. 1240 /// If not, this returns null. 1241 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q, 1242 unsigned MaxRecurse) { 1243 if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse)) 1244 return V; 1245 1246 return nullptr; 1247 } 1248 1249 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout &DL, 1250 const TargetLibraryInfo *TLI, 1251 const DominatorTree *DT, AssumptionCache *AC, 1252 const Instruction *CxtI) { 1253 return ::SimplifyURemInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI), 1254 RecursionLimit); 1255 } 1256 1257 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, 1258 const Query &, unsigned) { 1259 // undef % X -> undef (the undef could be a snan). 1260 if (match(Op0, m_Undef())) 1261 return Op0; 1262 1263 // X % undef -> undef 1264 if (match(Op1, m_Undef())) 1265 return Op1; 1266 1267 // 0 % X -> 0 1268 // Requires that NaNs are off (X could be zero) and signed zeroes are 1269 // ignored (X could be positive or negative, so the output sign is unknown). 1270 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero())) 1271 return Op0; 1272 1273 return nullptr; 1274 } 1275 1276 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, 1277 const DataLayout &DL, 1278 const TargetLibraryInfo *TLI, 1279 const DominatorTree *DT, AssumptionCache *AC, 1280 const Instruction *CxtI) { 1281 return ::SimplifyFRemInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI), 1282 RecursionLimit); 1283 } 1284 1285 /// Returns true if a shift by \c Amount always yields undef. 1286 static bool isUndefShift(Value *Amount) { 1287 Constant *C = dyn_cast<Constant>(Amount); 1288 if (!C) 1289 return false; 1290 1291 // X shift by undef -> undef because it may shift by the bitwidth. 1292 if (isa<UndefValue>(C)) 1293 return true; 1294 1295 // Shifting by the bitwidth or more is undefined. 1296 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1297 if (CI->getValue().getLimitedValue() >= 1298 CI->getType()->getScalarSizeInBits()) 1299 return true; 1300 1301 // If all lanes of a vector shift are undefined the whole shift is. 1302 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) { 1303 for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I) 1304 if (!isUndefShift(C->getAggregateElement(I))) 1305 return false; 1306 return true; 1307 } 1308 1309 return false; 1310 } 1311 1312 /// Given operands for an Shl, LShr or AShr, see if we can fold the result. 1313 /// If not, this returns null. 1314 static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1, 1315 const Query &Q, unsigned MaxRecurse) { 1316 if (Constant *C0 = dyn_cast<Constant>(Op0)) 1317 if (Constant *C1 = dyn_cast<Constant>(Op1)) 1318 return ConstantFoldBinaryOpOperands(Opcode, C0, C1, Q.DL); 1319 1320 // 0 shift by X -> 0 1321 if (match(Op0, m_Zero())) 1322 return Op0; 1323 1324 // X shift by 0 -> X 1325 if (match(Op1, m_Zero())) 1326 return Op0; 1327 1328 // Fold undefined shifts. 1329 if (isUndefShift(Op1)) 1330 return UndefValue::get(Op0->getType()); 1331 1332 // If the operation is with the result of a select instruction, check whether 1333 // operating on either branch of the select always yields the same value. 1334 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1335 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1336 return V; 1337 1338 // If the operation is with the result of a phi instruction, check whether 1339 // operating on all incoming values of the phi always yields the same value. 1340 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1341 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1342 return V; 1343 1344 // If any bits in the shift amount make that value greater than or equal to 1345 // the number of bits in the type, the shift is undefined. 1346 unsigned BitWidth = Op1->getType()->getScalarSizeInBits(); 1347 APInt KnownZero(BitWidth, 0); 1348 APInt KnownOne(BitWidth, 0); 1349 computeKnownBits(Op1, KnownZero, KnownOne, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1350 if (KnownOne.getLimitedValue() >= BitWidth) 1351 return UndefValue::get(Op0->getType()); 1352 1353 // If all valid bits in the shift amount are known zero, the first operand is 1354 // unchanged. 1355 unsigned NumValidShiftBits = Log2_32_Ceil(BitWidth); 1356 APInt ShiftAmountMask = APInt::getLowBitsSet(BitWidth, NumValidShiftBits); 1357 if ((KnownZero & ShiftAmountMask) == ShiftAmountMask) 1358 return Op0; 1359 1360 return nullptr; 1361 } 1362 1363 /// \brief Given operands for an Shl, LShr or AShr, see if we can 1364 /// fold the result. If not, this returns null. 1365 static Value *SimplifyRightShift(unsigned Opcode, Value *Op0, Value *Op1, 1366 bool isExact, const Query &Q, 1367 unsigned MaxRecurse) { 1368 if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse)) 1369 return V; 1370 1371 // X >> X -> 0 1372 if (Op0 == Op1) 1373 return Constant::getNullValue(Op0->getType()); 1374 1375 // undef >> X -> 0 1376 // undef >> X -> undef (if it's exact) 1377 if (match(Op0, m_Undef())) 1378 return isExact ? Op0 : Constant::getNullValue(Op0->getType()); 1379 1380 // The low bit cannot be shifted out of an exact shift if it is set. 1381 if (isExact) { 1382 unsigned BitWidth = Op0->getType()->getScalarSizeInBits(); 1383 APInt Op0KnownZero(BitWidth, 0); 1384 APInt Op0KnownOne(BitWidth, 0); 1385 computeKnownBits(Op0, Op0KnownZero, Op0KnownOne, Q.DL, /*Depth=*/0, Q.AC, 1386 Q.CxtI, Q.DT); 1387 if (Op0KnownOne[0]) 1388 return Op0; 1389 } 1390 1391 return nullptr; 1392 } 1393 1394 /// Given operands for an Shl, see if we can fold the result. 1395 /// If not, this returns null. 1396 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 1397 const Query &Q, unsigned MaxRecurse) { 1398 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse)) 1399 return V; 1400 1401 // undef << X -> 0 1402 // undef << X -> undef if (if it's NSW/NUW) 1403 if (match(Op0, m_Undef())) 1404 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType()); 1405 1406 // (X >> A) << A -> X 1407 Value *X; 1408 if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1))))) 1409 return X; 1410 return nullptr; 1411 } 1412 1413 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 1414 const DataLayout &DL, const TargetLibraryInfo *TLI, 1415 const DominatorTree *DT, AssumptionCache *AC, 1416 const Instruction *CxtI) { 1417 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI), 1418 RecursionLimit); 1419 } 1420 1421 /// Given operands for an LShr, see if we can fold the result. 1422 /// If not, this returns null. 1423 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 1424 const Query &Q, unsigned MaxRecurse) { 1425 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q, 1426 MaxRecurse)) 1427 return V; 1428 1429 // (X << A) >> A -> X 1430 Value *X; 1431 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1)))) 1432 return X; 1433 1434 return nullptr; 1435 } 1436 1437 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 1438 const DataLayout &DL, 1439 const TargetLibraryInfo *TLI, 1440 const DominatorTree *DT, AssumptionCache *AC, 1441 const Instruction *CxtI) { 1442 return ::SimplifyLShrInst(Op0, Op1, isExact, Query(DL, TLI, DT, AC, CxtI), 1443 RecursionLimit); 1444 } 1445 1446 /// Given operands for an AShr, see if we can fold the result. 1447 /// If not, this returns null. 1448 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 1449 const Query &Q, unsigned MaxRecurse) { 1450 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q, 1451 MaxRecurse)) 1452 return V; 1453 1454 // all ones >>a X -> all ones 1455 if (match(Op0, m_AllOnes())) 1456 return Op0; 1457 1458 // (X << A) >> A -> X 1459 Value *X; 1460 if (match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1)))) 1461 return X; 1462 1463 // Arithmetic shifting an all-sign-bit value is a no-op. 1464 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1465 if (NumSignBits == Op0->getType()->getScalarSizeInBits()) 1466 return Op0; 1467 1468 return nullptr; 1469 } 1470 1471 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 1472 const DataLayout &DL, 1473 const TargetLibraryInfo *TLI, 1474 const DominatorTree *DT, AssumptionCache *AC, 1475 const Instruction *CxtI) { 1476 return ::SimplifyAShrInst(Op0, Op1, isExact, Query(DL, TLI, DT, AC, CxtI), 1477 RecursionLimit); 1478 } 1479 1480 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, 1481 ICmpInst *UnsignedICmp, bool IsAnd) { 1482 Value *X, *Y; 1483 1484 ICmpInst::Predicate EqPred; 1485 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) || 1486 !ICmpInst::isEquality(EqPred)) 1487 return nullptr; 1488 1489 ICmpInst::Predicate UnsignedPred; 1490 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) && 1491 ICmpInst::isUnsigned(UnsignedPred)) 1492 ; 1493 else if (match(UnsignedICmp, 1494 m_ICmp(UnsignedPred, m_Value(Y), m_Specific(X))) && 1495 ICmpInst::isUnsigned(UnsignedPred)) 1496 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred); 1497 else 1498 return nullptr; 1499 1500 // X < Y && Y != 0 --> X < Y 1501 // X < Y || Y != 0 --> Y != 0 1502 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE) 1503 return IsAnd ? UnsignedICmp : ZeroICmp; 1504 1505 // X >= Y || Y != 0 --> true 1506 // X >= Y || Y == 0 --> X >= Y 1507 if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd) { 1508 if (EqPred == ICmpInst::ICMP_NE) 1509 return getTrue(UnsignedICmp->getType()); 1510 return UnsignedICmp; 1511 } 1512 1513 // X < Y && Y == 0 --> false 1514 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ && 1515 IsAnd) 1516 return getFalse(UnsignedICmp->getType()); 1517 1518 return nullptr; 1519 } 1520 1521 /// Commuted variants are assumed to be handled by calling this function again 1522 /// with the parameters swapped. 1523 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { 1524 ICmpInst::Predicate Pred0, Pred1; 1525 Value *A ,*B; 1526 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || 1527 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) 1528 return nullptr; 1529 1530 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B). 1531 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we 1532 // can eliminate Op1 from this 'and'. 1533 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) 1534 return Op0; 1535 1536 // Check for any combination of predicates that are guaranteed to be disjoint. 1537 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || 1538 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) || 1539 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) || 1540 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)) 1541 return getFalse(Op0->getType()); 1542 1543 return nullptr; 1544 } 1545 1546 /// Commuted variants are assumed to be handled by calling this function again 1547 /// with the parameters swapped. 1548 static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) { 1549 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true)) 1550 return X; 1551 1552 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1)) 1553 return X; 1554 1555 // Look for this pattern: (icmp V, C0) & (icmp V, C1)). 1556 Type *ITy = Op0->getType(); 1557 ICmpInst::Predicate Pred0, Pred1; 1558 const APInt *C0, *C1; 1559 Value *V; 1560 if (match(Op0, m_ICmp(Pred0, m_Value(V), m_APInt(C0))) && 1561 match(Op1, m_ICmp(Pred1, m_Specific(V), m_APInt(C1)))) { 1562 // Make a constant range that's the intersection of the two icmp ranges. 1563 // If the intersection is empty, we know that the result is false. 1564 auto Range0 = ConstantRange::makeAllowedICmpRegion(Pred0, *C0); 1565 auto Range1 = ConstantRange::makeAllowedICmpRegion(Pred1, *C1); 1566 if (Range0.intersectWith(Range1).isEmptySet()) 1567 return getFalse(ITy); 1568 } 1569 1570 // (icmp (add V, C0), C1) & (icmp V, C0) 1571 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) 1572 return nullptr; 1573 1574 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) 1575 return nullptr; 1576 1577 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0)); 1578 if (AddInst->getOperand(1) != Op1->getOperand(1)) 1579 return nullptr; 1580 1581 bool isNSW = AddInst->hasNoSignedWrap(); 1582 bool isNUW = AddInst->hasNoUnsignedWrap(); 1583 1584 const APInt Delta = *C1 - *C0; 1585 if (C0->isStrictlyPositive()) { 1586 if (Delta == 2) { 1587 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT) 1588 return getFalse(ITy); 1589 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW) 1590 return getFalse(ITy); 1591 } 1592 if (Delta == 1) { 1593 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT) 1594 return getFalse(ITy); 1595 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW) 1596 return getFalse(ITy); 1597 } 1598 } 1599 if (C0->getBoolValue() && isNUW) { 1600 if (Delta == 2) 1601 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT) 1602 return getFalse(ITy); 1603 if (Delta == 1) 1604 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT) 1605 return getFalse(ITy); 1606 } 1607 1608 return nullptr; 1609 } 1610 1611 /// Given operands for an And, see if we can fold the result. 1612 /// If not, this returns null. 1613 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q, 1614 unsigned MaxRecurse) { 1615 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 1616 if (Constant *CRHS = dyn_cast<Constant>(Op1)) 1617 return ConstantFoldBinaryOpOperands(Instruction::And, CLHS, CRHS, Q.DL); 1618 1619 // Canonicalize the constant to the RHS. 1620 std::swap(Op0, Op1); 1621 } 1622 1623 // X & undef -> 0 1624 if (match(Op1, m_Undef())) 1625 return Constant::getNullValue(Op0->getType()); 1626 1627 // X & X = X 1628 if (Op0 == Op1) 1629 return Op0; 1630 1631 // X & 0 = 0 1632 if (match(Op1, m_Zero())) 1633 return Op1; 1634 1635 // X & -1 = X 1636 if (match(Op1, m_AllOnes())) 1637 return Op0; 1638 1639 // A & ~A = ~A & A = 0 1640 if (match(Op0, m_Not(m_Specific(Op1))) || 1641 match(Op1, m_Not(m_Specific(Op0)))) 1642 return Constant::getNullValue(Op0->getType()); 1643 1644 // (A | ?) & A = A 1645 Value *A = nullptr, *B = nullptr; 1646 if (match(Op0, m_Or(m_Value(A), m_Value(B))) && 1647 (A == Op1 || B == Op1)) 1648 return Op1; 1649 1650 // A & (A | ?) = A 1651 if (match(Op1, m_Or(m_Value(A), m_Value(B))) && 1652 (A == Op0 || B == Op0)) 1653 return Op0; 1654 1655 // A & (-A) = A if A is a power of two or zero. 1656 if (match(Op0, m_Neg(m_Specific(Op1))) || 1657 match(Op1, m_Neg(m_Specific(Op0)))) { 1658 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, 1659 Q.DT)) 1660 return Op0; 1661 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, 1662 Q.DT)) 1663 return Op1; 1664 } 1665 1666 if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) { 1667 if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) { 1668 if (Value *V = SimplifyAndOfICmps(ICILHS, ICIRHS)) 1669 return V; 1670 if (Value *V = SimplifyAndOfICmps(ICIRHS, ICILHS)) 1671 return V; 1672 } 1673 } 1674 1675 // The compares may be hidden behind casts. Look through those and try the 1676 // same folds as above. 1677 auto *Cast0 = dyn_cast<CastInst>(Op0); 1678 auto *Cast1 = dyn_cast<CastInst>(Op1); 1679 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() && 1680 Cast0->getSrcTy() == Cast1->getSrcTy()) { 1681 auto *Cmp0 = dyn_cast<ICmpInst>(Cast0->getOperand(0)); 1682 auto *Cmp1 = dyn_cast<ICmpInst>(Cast1->getOperand(0)); 1683 if (Cmp0 && Cmp1) { 1684 Instruction::CastOps CastOpc = Cast0->getOpcode(); 1685 Type *ResultType = Cast0->getType(); 1686 if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp0, Cmp1))) 1687 return ConstantExpr::getCast(CastOpc, V, ResultType); 1688 if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp1, Cmp0))) 1689 return ConstantExpr::getCast(CastOpc, V, ResultType); 1690 } 1691 } 1692 1693 // Try some generic simplifications for associative operations. 1694 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, 1695 MaxRecurse)) 1696 return V; 1697 1698 // And distributes over Or. Try some generic simplifications based on this. 1699 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or, 1700 Q, MaxRecurse)) 1701 return V; 1702 1703 // And distributes over Xor. Try some generic simplifications based on this. 1704 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor, 1705 Q, MaxRecurse)) 1706 return V; 1707 1708 // If the operation is with the result of a select instruction, check whether 1709 // operating on either branch of the select always yields the same value. 1710 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1711 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q, 1712 MaxRecurse)) 1713 return V; 1714 1715 // If the operation is with the result of a phi instruction, check whether 1716 // operating on all incoming values of the phi always yields the same value. 1717 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1718 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q, 1719 MaxRecurse)) 1720 return V; 1721 1722 return nullptr; 1723 } 1724 1725 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout &DL, 1726 const TargetLibraryInfo *TLI, 1727 const DominatorTree *DT, AssumptionCache *AC, 1728 const Instruction *CxtI) { 1729 return ::SimplifyAndInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI), 1730 RecursionLimit); 1731 } 1732 1733 /// Commuted variants are assumed to be handled by calling this function again 1734 /// with the parameters swapped. 1735 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { 1736 ICmpInst::Predicate Pred0, Pred1; 1737 Value *A ,*B; 1738 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || 1739 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) 1740 return nullptr; 1741 1742 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B). 1743 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we 1744 // can eliminate Op0 from this 'or'. 1745 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) 1746 return Op1; 1747 1748 // Check for any combination of predicates that cover the entire range of 1749 // possibilities. 1750 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || 1751 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) || 1752 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) || 1753 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE)) 1754 return getTrue(Op0->getType()); 1755 1756 return nullptr; 1757 } 1758 1759 /// Commuted variants are assumed to be handled by calling this function again 1760 /// with the parameters swapped. 1761 static Value *SimplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) { 1762 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false)) 1763 return X; 1764 1765 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1)) 1766 return X; 1767 1768 // (icmp (add V, C0), C1) | (icmp V, C0) 1769 ICmpInst::Predicate Pred0, Pred1; 1770 const APInt *C0, *C1; 1771 Value *V; 1772 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) 1773 return nullptr; 1774 1775 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) 1776 return nullptr; 1777 1778 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0)); 1779 if (AddInst->getOperand(1) != Op1->getOperand(1)) 1780 return nullptr; 1781 1782 Type *ITy = Op0->getType(); 1783 bool isNSW = AddInst->hasNoSignedWrap(); 1784 bool isNUW = AddInst->hasNoUnsignedWrap(); 1785 1786 const APInt Delta = *C1 - *C0; 1787 if (C0->isStrictlyPositive()) { 1788 if (Delta == 2) { 1789 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE) 1790 return getTrue(ITy); 1791 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW) 1792 return getTrue(ITy); 1793 } 1794 if (Delta == 1) { 1795 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE) 1796 return getTrue(ITy); 1797 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW) 1798 return getTrue(ITy); 1799 } 1800 } 1801 if (C0->getBoolValue() && isNUW) { 1802 if (Delta == 2) 1803 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE) 1804 return getTrue(ITy); 1805 if (Delta == 1) 1806 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE) 1807 return getTrue(ITy); 1808 } 1809 1810 return nullptr; 1811 } 1812 1813 /// Given operands for an Or, see if we can fold the result. 1814 /// If not, this returns null. 1815 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q, 1816 unsigned MaxRecurse) { 1817 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 1818 if (Constant *CRHS = dyn_cast<Constant>(Op1)) 1819 return ConstantFoldBinaryOpOperands(Instruction::Or, CLHS, CRHS, Q.DL); 1820 1821 // Canonicalize the constant to the RHS. 1822 std::swap(Op0, Op1); 1823 } 1824 1825 // X | undef -> -1 1826 if (match(Op1, m_Undef())) 1827 return Constant::getAllOnesValue(Op0->getType()); 1828 1829 // X | X = X 1830 if (Op0 == Op1) 1831 return Op0; 1832 1833 // X | 0 = X 1834 if (match(Op1, m_Zero())) 1835 return Op0; 1836 1837 // X | -1 = -1 1838 if (match(Op1, m_AllOnes())) 1839 return Op1; 1840 1841 // A | ~A = ~A | A = -1 1842 if (match(Op0, m_Not(m_Specific(Op1))) || 1843 match(Op1, m_Not(m_Specific(Op0)))) 1844 return Constant::getAllOnesValue(Op0->getType()); 1845 1846 // (A & ?) | A = A 1847 Value *A = nullptr, *B = nullptr; 1848 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 1849 (A == Op1 || B == Op1)) 1850 return Op1; 1851 1852 // A | (A & ?) = A 1853 if (match(Op1, m_And(m_Value(A), m_Value(B))) && 1854 (A == Op0 || B == Op0)) 1855 return Op0; 1856 1857 // ~(A & ?) | A = -1 1858 if (match(Op0, m_Not(m_And(m_Value(A), m_Value(B)))) && 1859 (A == Op1 || B == Op1)) 1860 return Constant::getAllOnesValue(Op1->getType()); 1861 1862 // A | ~(A & ?) = -1 1863 if (match(Op1, m_Not(m_And(m_Value(A), m_Value(B)))) && 1864 (A == Op0 || B == Op0)) 1865 return Constant::getAllOnesValue(Op0->getType()); 1866 1867 if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) { 1868 if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) { 1869 if (Value *V = SimplifyOrOfICmps(ICILHS, ICIRHS)) 1870 return V; 1871 if (Value *V = SimplifyOrOfICmps(ICIRHS, ICILHS)) 1872 return V; 1873 } 1874 } 1875 1876 // Try some generic simplifications for associative operations. 1877 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, 1878 MaxRecurse)) 1879 return V; 1880 1881 // Or distributes over And. Try some generic simplifications based on this. 1882 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q, 1883 MaxRecurse)) 1884 return V; 1885 1886 // If the operation is with the result of a select instruction, check whether 1887 // operating on either branch of the select always yields the same value. 1888 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1889 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, 1890 MaxRecurse)) 1891 return V; 1892 1893 // (A & C)|(B & D) 1894 Value *C = nullptr, *D = nullptr; 1895 if (match(Op0, m_And(m_Value(A), m_Value(C))) && 1896 match(Op1, m_And(m_Value(B), m_Value(D)))) { 1897 ConstantInt *C1 = dyn_cast<ConstantInt>(C); 1898 ConstantInt *C2 = dyn_cast<ConstantInt>(D); 1899 if (C1 && C2 && (C1->getValue() == ~C2->getValue())) { 1900 // (A & C1)|(B & C2) 1901 // If we have: ((V + N) & C1) | (V & C2) 1902 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0 1903 // replace with V+N. 1904 Value *V1, *V2; 1905 if ((C2->getValue() & (C2->getValue() + 1)) == 0 && // C2 == 0+1+ 1906 match(A, m_Add(m_Value(V1), m_Value(V2)))) { 1907 // Add commutes, try both ways. 1908 if (V1 == B && 1909 MaskedValueIsZero(V2, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 1910 return A; 1911 if (V2 == B && 1912 MaskedValueIsZero(V1, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 1913 return A; 1914 } 1915 // Or commutes, try both ways. 1916 if ((C1->getValue() & (C1->getValue() + 1)) == 0 && 1917 match(B, m_Add(m_Value(V1), m_Value(V2)))) { 1918 // Add commutes, try both ways. 1919 if (V1 == A && 1920 MaskedValueIsZero(V2, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 1921 return B; 1922 if (V2 == A && 1923 MaskedValueIsZero(V1, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 1924 return B; 1925 } 1926 } 1927 } 1928 1929 // If the operation is with the result of a phi instruction, check whether 1930 // operating on all incoming values of the phi always yields the same value. 1931 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1932 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse)) 1933 return V; 1934 1935 return nullptr; 1936 } 1937 1938 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout &DL, 1939 const TargetLibraryInfo *TLI, 1940 const DominatorTree *DT, AssumptionCache *AC, 1941 const Instruction *CxtI) { 1942 return ::SimplifyOrInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI), 1943 RecursionLimit); 1944 } 1945 1946 /// Given operands for a Xor, see if we can fold the result. 1947 /// If not, this returns null. 1948 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q, 1949 unsigned MaxRecurse) { 1950 if (Constant *CLHS = dyn_cast<Constant>(Op0)) { 1951 if (Constant *CRHS = dyn_cast<Constant>(Op1)) 1952 return ConstantFoldBinaryOpOperands(Instruction::Xor, CLHS, CRHS, Q.DL); 1953 1954 // Canonicalize the constant to the RHS. 1955 std::swap(Op0, Op1); 1956 } 1957 1958 // A ^ undef -> undef 1959 if (match(Op1, m_Undef())) 1960 return Op1; 1961 1962 // A ^ 0 = A 1963 if (match(Op1, m_Zero())) 1964 return Op0; 1965 1966 // A ^ A = 0 1967 if (Op0 == Op1) 1968 return Constant::getNullValue(Op0->getType()); 1969 1970 // A ^ ~A = ~A ^ A = -1 1971 if (match(Op0, m_Not(m_Specific(Op1))) || 1972 match(Op1, m_Not(m_Specific(Op0)))) 1973 return Constant::getAllOnesValue(Op0->getType()); 1974 1975 // Try some generic simplifications for associative operations. 1976 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, 1977 MaxRecurse)) 1978 return V; 1979 1980 // Threading Xor over selects and phi nodes is pointless, so don't bother. 1981 // Threading over the select in "A ^ select(cond, B, C)" means evaluating 1982 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and 1983 // only if B and C are equal. If B and C are equal then (since we assume 1984 // that operands have already been simplified) "select(cond, B, C)" should 1985 // have been simplified to the common value of B and C already. Analysing 1986 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly 1987 // for threading over phi nodes. 1988 1989 return nullptr; 1990 } 1991 1992 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout &DL, 1993 const TargetLibraryInfo *TLI, 1994 const DominatorTree *DT, AssumptionCache *AC, 1995 const Instruction *CxtI) { 1996 return ::SimplifyXorInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI), 1997 RecursionLimit); 1998 } 1999 2000 static Type *GetCompareTy(Value *Op) { 2001 return CmpInst::makeCmpResultType(Op->getType()); 2002 } 2003 2004 /// Rummage around inside V looking for something equivalent to the comparison 2005 /// "LHS Pred RHS". Return such a value if found, otherwise return null. 2006 /// Helper function for analyzing max/min idioms. 2007 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred, 2008 Value *LHS, Value *RHS) { 2009 SelectInst *SI = dyn_cast<SelectInst>(V); 2010 if (!SI) 2011 return nullptr; 2012 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 2013 if (!Cmp) 2014 return nullptr; 2015 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1); 2016 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS) 2017 return Cmp; 2018 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) && 2019 LHS == CmpRHS && RHS == CmpLHS) 2020 return Cmp; 2021 return nullptr; 2022 } 2023 2024 // A significant optimization not implemented here is assuming that alloca 2025 // addresses are not equal to incoming argument values. They don't *alias*, 2026 // as we say, but that doesn't mean they aren't equal, so we take a 2027 // conservative approach. 2028 // 2029 // This is inspired in part by C++11 5.10p1: 2030 // "Two pointers of the same type compare equal if and only if they are both 2031 // null, both point to the same function, or both represent the same 2032 // address." 2033 // 2034 // This is pretty permissive. 2035 // 2036 // It's also partly due to C11 6.5.9p6: 2037 // "Two pointers compare equal if and only if both are null pointers, both are 2038 // pointers to the same object (including a pointer to an object and a 2039 // subobject at its beginning) or function, both are pointers to one past the 2040 // last element of the same array object, or one is a pointer to one past the 2041 // end of one array object and the other is a pointer to the start of a 2042 // different array object that happens to immediately follow the first array 2043 // object in the address space.) 2044 // 2045 // C11's version is more restrictive, however there's no reason why an argument 2046 // couldn't be a one-past-the-end value for a stack object in the caller and be 2047 // equal to the beginning of a stack object in the callee. 2048 // 2049 // If the C and C++ standards are ever made sufficiently restrictive in this 2050 // area, it may be possible to update LLVM's semantics accordingly and reinstate 2051 // this optimization. 2052 static Constant * 2053 computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI, 2054 const DominatorTree *DT, CmpInst::Predicate Pred, 2055 const Instruction *CxtI, Value *LHS, Value *RHS) { 2056 // First, skip past any trivial no-ops. 2057 LHS = LHS->stripPointerCasts(); 2058 RHS = RHS->stripPointerCasts(); 2059 2060 // A non-null pointer is not equal to a null pointer. 2061 if (llvm::isKnownNonNull(LHS) && isa<ConstantPointerNull>(RHS) && 2062 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE)) 2063 return ConstantInt::get(GetCompareTy(LHS), 2064 !CmpInst::isTrueWhenEqual(Pred)); 2065 2066 // We can only fold certain predicates on pointer comparisons. 2067 switch (Pred) { 2068 default: 2069 return nullptr; 2070 2071 // Equality comaprisons are easy to fold. 2072 case CmpInst::ICMP_EQ: 2073 case CmpInst::ICMP_NE: 2074 break; 2075 2076 // We can only handle unsigned relational comparisons because 'inbounds' on 2077 // a GEP only protects against unsigned wrapping. 2078 case CmpInst::ICMP_UGT: 2079 case CmpInst::ICMP_UGE: 2080 case CmpInst::ICMP_ULT: 2081 case CmpInst::ICMP_ULE: 2082 // However, we have to switch them to their signed variants to handle 2083 // negative indices from the base pointer. 2084 Pred = ICmpInst::getSignedPredicate(Pred); 2085 break; 2086 } 2087 2088 // Strip off any constant offsets so that we can reason about them. 2089 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets 2090 // here and compare base addresses like AliasAnalysis does, however there are 2091 // numerous hazards. AliasAnalysis and its utilities rely on special rules 2092 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis 2093 // doesn't need to guarantee pointer inequality when it says NoAlias. 2094 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 2095 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 2096 2097 // If LHS and RHS are related via constant offsets to the same base 2098 // value, we can replace it with an icmp which just compares the offsets. 2099 if (LHS == RHS) 2100 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset); 2101 2102 // Various optimizations for (in)equality comparisons. 2103 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { 2104 // Different non-empty allocations that exist at the same time have 2105 // different addresses (if the program can tell). Global variables always 2106 // exist, so they always exist during the lifetime of each other and all 2107 // allocas. Two different allocas usually have different addresses... 2108 // 2109 // However, if there's an @llvm.stackrestore dynamically in between two 2110 // allocas, they may have the same address. It's tempting to reduce the 2111 // scope of the problem by only looking at *static* allocas here. That would 2112 // cover the majority of allocas while significantly reducing the likelihood 2113 // of having an @llvm.stackrestore pop up in the middle. However, it's not 2114 // actually impossible for an @llvm.stackrestore to pop up in the middle of 2115 // an entry block. Also, if we have a block that's not attached to a 2116 // function, we can't tell if it's "static" under the current definition. 2117 // Theoretically, this problem could be fixed by creating a new kind of 2118 // instruction kind specifically for static allocas. Such a new instruction 2119 // could be required to be at the top of the entry block, thus preventing it 2120 // from being subject to a @llvm.stackrestore. Instcombine could even 2121 // convert regular allocas into these special allocas. It'd be nifty. 2122 // However, until then, this problem remains open. 2123 // 2124 // So, we'll assume that two non-empty allocas have different addresses 2125 // for now. 2126 // 2127 // With all that, if the offsets are within the bounds of their allocations 2128 // (and not one-past-the-end! so we can't use inbounds!), and their 2129 // allocations aren't the same, the pointers are not equal. 2130 // 2131 // Note that it's not necessary to check for LHS being a global variable 2132 // address, due to canonicalization and constant folding. 2133 if (isa<AllocaInst>(LHS) && 2134 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) { 2135 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset); 2136 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset); 2137 uint64_t LHSSize, RHSSize; 2138 if (LHSOffsetCI && RHSOffsetCI && 2139 getObjectSize(LHS, LHSSize, DL, TLI) && 2140 getObjectSize(RHS, RHSSize, DL, TLI)) { 2141 const APInt &LHSOffsetValue = LHSOffsetCI->getValue(); 2142 const APInt &RHSOffsetValue = RHSOffsetCI->getValue(); 2143 if (!LHSOffsetValue.isNegative() && 2144 !RHSOffsetValue.isNegative() && 2145 LHSOffsetValue.ult(LHSSize) && 2146 RHSOffsetValue.ult(RHSSize)) { 2147 return ConstantInt::get(GetCompareTy(LHS), 2148 !CmpInst::isTrueWhenEqual(Pred)); 2149 } 2150 } 2151 2152 // Repeat the above check but this time without depending on DataLayout 2153 // or being able to compute a precise size. 2154 if (!cast<PointerType>(LHS->getType())->isEmptyTy() && 2155 !cast<PointerType>(RHS->getType())->isEmptyTy() && 2156 LHSOffset->isNullValue() && 2157 RHSOffset->isNullValue()) 2158 return ConstantInt::get(GetCompareTy(LHS), 2159 !CmpInst::isTrueWhenEqual(Pred)); 2160 } 2161 2162 // Even if an non-inbounds GEP occurs along the path we can still optimize 2163 // equality comparisons concerning the result. We avoid walking the whole 2164 // chain again by starting where the last calls to 2165 // stripAndComputeConstantOffsets left off and accumulate the offsets. 2166 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true); 2167 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true); 2168 if (LHS == RHS) 2169 return ConstantExpr::getICmp(Pred, 2170 ConstantExpr::getAdd(LHSOffset, LHSNoBound), 2171 ConstantExpr::getAdd(RHSOffset, RHSNoBound)); 2172 2173 // If one side of the equality comparison must come from a noalias call 2174 // (meaning a system memory allocation function), and the other side must 2175 // come from a pointer that cannot overlap with dynamically-allocated 2176 // memory within the lifetime of the current function (allocas, byval 2177 // arguments, globals), then determine the comparison result here. 2178 SmallVector<Value *, 8> LHSUObjs, RHSUObjs; 2179 GetUnderlyingObjects(LHS, LHSUObjs, DL); 2180 GetUnderlyingObjects(RHS, RHSUObjs, DL); 2181 2182 // Is the set of underlying objects all noalias calls? 2183 auto IsNAC = [](ArrayRef<Value *> Objects) { 2184 return all_of(Objects, isNoAliasCall); 2185 }; 2186 2187 // Is the set of underlying objects all things which must be disjoint from 2188 // noalias calls. For allocas, we consider only static ones (dynamic 2189 // allocas might be transformed into calls to malloc not simultaneously 2190 // live with the compared-to allocation). For globals, we exclude symbols 2191 // that might be resolve lazily to symbols in another dynamically-loaded 2192 // library (and, thus, could be malloc'ed by the implementation). 2193 auto IsAllocDisjoint = [](ArrayRef<Value *> Objects) { 2194 return all_of(Objects, [](Value *V) { 2195 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) 2196 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca(); 2197 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 2198 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() || 2199 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) && 2200 !GV->isThreadLocal(); 2201 if (const Argument *A = dyn_cast<Argument>(V)) 2202 return A->hasByValAttr(); 2203 return false; 2204 }); 2205 }; 2206 2207 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) || 2208 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs))) 2209 return ConstantInt::get(GetCompareTy(LHS), 2210 !CmpInst::isTrueWhenEqual(Pred)); 2211 2212 // Fold comparisons for non-escaping pointer even if the allocation call 2213 // cannot be elided. We cannot fold malloc comparison to null. Also, the 2214 // dynamic allocation call could be either of the operands. 2215 Value *MI = nullptr; 2216 if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT)) 2217 MI = LHS; 2218 else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonNullAt(LHS, CxtI, DT)) 2219 MI = RHS; 2220 // FIXME: We should also fold the compare when the pointer escapes, but the 2221 // compare dominates the pointer escape 2222 if (MI && !PointerMayBeCaptured(MI, true, true)) 2223 return ConstantInt::get(GetCompareTy(LHS), 2224 CmpInst::isFalseWhenEqual(Pred)); 2225 } 2226 2227 // Otherwise, fail. 2228 return nullptr; 2229 } 2230 2231 /// Fold an icmp when its operands have i1 scalar type. 2232 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS, 2233 Value *RHS, const Query &Q) { 2234 Type *ITy = GetCompareTy(LHS); // The return type. 2235 Type *OpTy = LHS->getType(); // The operand type. 2236 if (!OpTy->getScalarType()->isIntegerTy(1)) 2237 return nullptr; 2238 2239 switch (Pred) { 2240 default: 2241 break; 2242 case ICmpInst::ICMP_EQ: 2243 // X == 1 -> X 2244 if (match(RHS, m_One())) 2245 return LHS; 2246 break; 2247 case ICmpInst::ICMP_NE: 2248 // X != 0 -> X 2249 if (match(RHS, m_Zero())) 2250 return LHS; 2251 break; 2252 case ICmpInst::ICMP_UGT: 2253 // X >u 0 -> X 2254 if (match(RHS, m_Zero())) 2255 return LHS; 2256 break; 2257 case ICmpInst::ICMP_UGE: 2258 // X >=u 1 -> X 2259 if (match(RHS, m_One())) 2260 return LHS; 2261 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false)) 2262 return getTrue(ITy); 2263 break; 2264 case ICmpInst::ICMP_SGE: 2265 /// For signed comparison, the values for an i1 are 0 and -1 2266 /// respectively. This maps into a truth table of: 2267 /// LHS | RHS | LHS >=s RHS | LHS implies RHS 2268 /// 0 | 0 | 1 (0 >= 0) | 1 2269 /// 0 | 1 | 1 (0 >= -1) | 1 2270 /// 1 | 0 | 0 (-1 >= 0) | 0 2271 /// 1 | 1 | 1 (-1 >= -1) | 1 2272 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false)) 2273 return getTrue(ITy); 2274 break; 2275 case ICmpInst::ICMP_SLT: 2276 // X <s 0 -> X 2277 if (match(RHS, m_Zero())) 2278 return LHS; 2279 break; 2280 case ICmpInst::ICMP_SLE: 2281 // X <=s -1 -> X 2282 if (match(RHS, m_One())) 2283 return LHS; 2284 break; 2285 case ICmpInst::ICMP_ULE: 2286 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false)) 2287 return getTrue(ITy); 2288 break; 2289 } 2290 2291 return nullptr; 2292 } 2293 2294 /// Try hard to fold icmp with zero RHS because this is a common case. 2295 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS, 2296 Value *RHS, const Query &Q) { 2297 if (!match(RHS, m_Zero())) 2298 return nullptr; 2299 2300 Type *ITy = GetCompareTy(LHS); // The return type. 2301 bool LHSKnownNonNegative, LHSKnownNegative; 2302 switch (Pred) { 2303 default: 2304 llvm_unreachable("Unknown ICmp predicate!"); 2305 case ICmpInst::ICMP_ULT: 2306 return getFalse(ITy); 2307 case ICmpInst::ICMP_UGE: 2308 return getTrue(ITy); 2309 case ICmpInst::ICMP_EQ: 2310 case ICmpInst::ICMP_ULE: 2311 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2312 return getFalse(ITy); 2313 break; 2314 case ICmpInst::ICMP_NE: 2315 case ICmpInst::ICMP_UGT: 2316 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2317 return getTrue(ITy); 2318 break; 2319 case ICmpInst::ICMP_SLT: 2320 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC, 2321 Q.CxtI, Q.DT); 2322 if (LHSKnownNegative) 2323 return getTrue(ITy); 2324 if (LHSKnownNonNegative) 2325 return getFalse(ITy); 2326 break; 2327 case ICmpInst::ICMP_SLE: 2328 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC, 2329 Q.CxtI, Q.DT); 2330 if (LHSKnownNegative) 2331 return getTrue(ITy); 2332 if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2333 return getFalse(ITy); 2334 break; 2335 case ICmpInst::ICMP_SGE: 2336 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC, 2337 Q.CxtI, Q.DT); 2338 if (LHSKnownNegative) 2339 return getFalse(ITy); 2340 if (LHSKnownNonNegative) 2341 return getTrue(ITy); 2342 break; 2343 case ICmpInst::ICMP_SGT: 2344 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, Q.AC, 2345 Q.CxtI, Q.DT); 2346 if (LHSKnownNegative) 2347 return getFalse(ITy); 2348 if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2349 return getTrue(ITy); 2350 break; 2351 } 2352 2353 return nullptr; 2354 } 2355 2356 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS, 2357 Value *RHS) { 2358 const APInt *C; 2359 if (!match(RHS, m_APInt(C))) 2360 return nullptr; 2361 2362 // Rule out tautological comparisons (eg., ult 0 or uge 0). 2363 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C); 2364 if (RHS_CR.isEmptySet()) 2365 return ConstantInt::getFalse(GetCompareTy(RHS)); 2366 if (RHS_CR.isFullSet()) 2367 return ConstantInt::getTrue(GetCompareTy(RHS)); 2368 2369 // Many binary operators with constant RHS have easy to compute constant 2370 // range. Use them to check whether the comparison is a tautology. 2371 unsigned Width = C->getBitWidth(); 2372 APInt Lower = APInt(Width, 0); 2373 APInt Upper = APInt(Width, 0); 2374 const APInt *C2; 2375 if (match(LHS, m_URem(m_Value(), m_APInt(C2)))) { 2376 // 'urem x, C2' produces [0, C2). 2377 Upper = *C2; 2378 } else if (match(LHS, m_SRem(m_Value(), m_APInt(C2)))) { 2379 // 'srem x, C2' produces (-|C2|, |C2|). 2380 Upper = C2->abs(); 2381 Lower = (-Upper) + 1; 2382 } else if (match(LHS, m_UDiv(m_APInt(C2), m_Value()))) { 2383 // 'udiv C2, x' produces [0, C2]. 2384 Upper = *C2 + 1; 2385 } else if (match(LHS, m_UDiv(m_Value(), m_APInt(C2)))) { 2386 // 'udiv x, C2' produces [0, UINT_MAX / C2]. 2387 APInt NegOne = APInt::getAllOnesValue(Width); 2388 if (*C2 != 0) 2389 Upper = NegOne.udiv(*C2) + 1; 2390 } else if (match(LHS, m_SDiv(m_APInt(C2), m_Value()))) { 2391 if (C2->isMinSignedValue()) { 2392 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. 2393 Lower = *C2; 2394 Upper = Lower.lshr(1) + 1; 2395 } else { 2396 // 'sdiv C2, x' produces [-|C2|, |C2|]. 2397 Upper = C2->abs() + 1; 2398 Lower = (-Upper) + 1; 2399 } 2400 } else if (match(LHS, m_SDiv(m_Value(), m_APInt(C2)))) { 2401 APInt IntMin = APInt::getSignedMinValue(Width); 2402 APInt IntMax = APInt::getSignedMaxValue(Width); 2403 if (C2->isAllOnesValue()) { 2404 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] 2405 // where C2 != -1 and C2 != 0 and C2 != 1 2406 Lower = IntMin + 1; 2407 Upper = IntMax + 1; 2408 } else if (C2->countLeadingZeros() < Width - 1) { 2409 // 'sdiv x, C2' produces [INT_MIN / C2, INT_MAX / C2] 2410 // where C2 != -1 and C2 != 0 and C2 != 1 2411 Lower = IntMin.sdiv(*C2); 2412 Upper = IntMax.sdiv(*C2); 2413 if (Lower.sgt(Upper)) 2414 std::swap(Lower, Upper); 2415 Upper = Upper + 1; 2416 assert(Upper != Lower && "Upper part of range has wrapped!"); 2417 } 2418 } else if (match(LHS, m_NUWShl(m_APInt(C2), m_Value()))) { 2419 // 'shl nuw C2, x' produces [C2, C2 << CLZ(C2)] 2420 Lower = *C2; 2421 Upper = Lower.shl(Lower.countLeadingZeros()) + 1; 2422 } else if (match(LHS, m_NSWShl(m_APInt(C2), m_Value()))) { 2423 if (C2->isNegative()) { 2424 // 'shl nsw C2, x' produces [C2 << CLO(C2)-1, C2] 2425 unsigned ShiftAmount = C2->countLeadingOnes() - 1; 2426 Lower = C2->shl(ShiftAmount); 2427 Upper = *C2 + 1; 2428 } else { 2429 // 'shl nsw C2, x' produces [C2, C2 << CLZ(C2)-1] 2430 unsigned ShiftAmount = C2->countLeadingZeros() - 1; 2431 Lower = *C2; 2432 Upper = C2->shl(ShiftAmount) + 1; 2433 } 2434 } else if (match(LHS, m_LShr(m_Value(), m_APInt(C2)))) { 2435 // 'lshr x, C2' produces [0, UINT_MAX >> C2]. 2436 APInt NegOne = APInt::getAllOnesValue(Width); 2437 if (C2->ult(Width)) 2438 Upper = NegOne.lshr(*C2) + 1; 2439 } else if (match(LHS, m_LShr(m_APInt(C2), m_Value()))) { 2440 // 'lshr C2, x' produces [C2 >> (Width-1), C2]. 2441 unsigned ShiftAmount = Width - 1; 2442 if (*C2 != 0 && cast<BinaryOperator>(LHS)->isExact()) 2443 ShiftAmount = C2->countTrailingZeros(); 2444 Lower = C2->lshr(ShiftAmount); 2445 Upper = *C2 + 1; 2446 } else if (match(LHS, m_AShr(m_Value(), m_APInt(C2)))) { 2447 // 'ashr x, C2' produces [INT_MIN >> C2, INT_MAX >> C2]. 2448 APInt IntMin = APInt::getSignedMinValue(Width); 2449 APInt IntMax = APInt::getSignedMaxValue(Width); 2450 if (C2->ult(Width)) { 2451 Lower = IntMin.ashr(*C2); 2452 Upper = IntMax.ashr(*C2) + 1; 2453 } 2454 } else if (match(LHS, m_AShr(m_APInt(C2), m_Value()))) { 2455 unsigned ShiftAmount = Width - 1; 2456 if (*C2 != 0 && cast<BinaryOperator>(LHS)->isExact()) 2457 ShiftAmount = C2->countTrailingZeros(); 2458 if (C2->isNegative()) { 2459 // 'ashr C2, x' produces [C2, C2 >> (Width-1)] 2460 Lower = *C2; 2461 Upper = C2->ashr(ShiftAmount) + 1; 2462 } else { 2463 // 'ashr C2, x' produces [C2 >> (Width-1), C2] 2464 Lower = C2->ashr(ShiftAmount); 2465 Upper = *C2 + 1; 2466 } 2467 } else if (match(LHS, m_Or(m_Value(), m_APInt(C2)))) { 2468 // 'or x, C2' produces [C2, UINT_MAX]. 2469 Lower = *C2; 2470 } else if (match(LHS, m_And(m_Value(), m_APInt(C2)))) { 2471 // 'and x, C2' produces [0, C2]. 2472 Upper = *C2 + 1; 2473 } else if (match(LHS, m_NUWAdd(m_Value(), m_APInt(C2)))) { 2474 // 'add nuw x, C2' produces [C2, UINT_MAX]. 2475 Lower = *C2; 2476 } 2477 2478 ConstantRange LHS_CR = 2479 Lower != Upper ? ConstantRange(Lower, Upper) : ConstantRange(Width, true); 2480 2481 if (auto *I = dyn_cast<Instruction>(LHS)) 2482 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range)) 2483 LHS_CR = LHS_CR.intersectWith(getConstantRangeFromMetadata(*Ranges)); 2484 2485 if (!LHS_CR.isFullSet()) { 2486 if (RHS_CR.contains(LHS_CR)) 2487 return ConstantInt::getTrue(GetCompareTy(RHS)); 2488 if (RHS_CR.inverse().contains(LHS_CR)) 2489 return ConstantInt::getFalse(GetCompareTy(RHS)); 2490 } 2491 2492 return nullptr; 2493 } 2494 2495 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, 2496 Value *RHS, const Query &Q, 2497 unsigned MaxRecurse) { 2498 Type *ITy = GetCompareTy(LHS); // The return type. 2499 2500 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS); 2501 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS); 2502 if (MaxRecurse && (LBO || RBO)) { 2503 // Analyze the case when either LHS or RHS is an add instruction. 2504 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 2505 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null). 2506 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false; 2507 if (LBO && LBO->getOpcode() == Instruction::Add) { 2508 A = LBO->getOperand(0); 2509 B = LBO->getOperand(1); 2510 NoLHSWrapProblem = 2511 ICmpInst::isEquality(Pred) || 2512 (CmpInst::isUnsigned(Pred) && LBO->hasNoUnsignedWrap()) || 2513 (CmpInst::isSigned(Pred) && LBO->hasNoSignedWrap()); 2514 } 2515 if (RBO && RBO->getOpcode() == Instruction::Add) { 2516 C = RBO->getOperand(0); 2517 D = RBO->getOperand(1); 2518 NoRHSWrapProblem = 2519 ICmpInst::isEquality(Pred) || 2520 (CmpInst::isUnsigned(Pred) && RBO->hasNoUnsignedWrap()) || 2521 (CmpInst::isSigned(Pred) && RBO->hasNoSignedWrap()); 2522 } 2523 2524 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. 2525 if ((A == RHS || B == RHS) && NoLHSWrapProblem) 2526 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A, 2527 Constant::getNullValue(RHS->getType()), Q, 2528 MaxRecurse - 1)) 2529 return V; 2530 2531 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. 2532 if ((C == LHS || D == LHS) && NoRHSWrapProblem) 2533 if (Value *V = 2534 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()), 2535 C == LHS ? D : C, Q, MaxRecurse - 1)) 2536 return V; 2537 2538 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow. 2539 if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem && 2540 NoRHSWrapProblem) { 2541 // Determine Y and Z in the form icmp (X+Y), (X+Z). 2542 Value *Y, *Z; 2543 if (A == C) { 2544 // C + B == C + D -> B == D 2545 Y = B; 2546 Z = D; 2547 } else if (A == D) { 2548 // D + B == C + D -> B == C 2549 Y = B; 2550 Z = C; 2551 } else if (B == C) { 2552 // A + C == C + D -> A == D 2553 Y = A; 2554 Z = D; 2555 } else { 2556 assert(B == D); 2557 // A + D == C + D -> A == C 2558 Y = A; 2559 Z = C; 2560 } 2561 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1)) 2562 return V; 2563 } 2564 } 2565 2566 { 2567 Value *Y = nullptr; 2568 // icmp pred (or X, Y), X 2569 if (LBO && match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) { 2570 if (Pred == ICmpInst::ICMP_ULT) 2571 return getFalse(ITy); 2572 if (Pred == ICmpInst::ICMP_UGE) 2573 return getTrue(ITy); 2574 2575 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) { 2576 bool RHSKnownNonNegative, RHSKnownNegative; 2577 bool YKnownNonNegative, YKnownNegative; 2578 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, Q.DL, 0, 2579 Q.AC, Q.CxtI, Q.DT); 2580 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Q.DL, 0, Q.AC, 2581 Q.CxtI, Q.DT); 2582 if (RHSKnownNonNegative && YKnownNegative) 2583 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy); 2584 if (RHSKnownNegative || YKnownNonNegative) 2585 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy); 2586 } 2587 } 2588 // icmp pred X, (or X, Y) 2589 if (RBO && match(RBO, m_c_Or(m_Value(Y), m_Specific(LHS)))) { 2590 if (Pred == ICmpInst::ICMP_ULE) 2591 return getTrue(ITy); 2592 if (Pred == ICmpInst::ICMP_UGT) 2593 return getFalse(ITy); 2594 2595 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) { 2596 bool LHSKnownNonNegative, LHSKnownNegative; 2597 bool YKnownNonNegative, YKnownNegative; 2598 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL, 0, 2599 Q.AC, Q.CxtI, Q.DT); 2600 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Q.DL, 0, Q.AC, 2601 Q.CxtI, Q.DT); 2602 if (LHSKnownNonNegative && YKnownNegative) 2603 return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy); 2604 if (LHSKnownNegative || YKnownNonNegative) 2605 return Pred == ICmpInst::ICMP_SGT ? getFalse(ITy) : getTrue(ITy); 2606 } 2607 } 2608 } 2609 2610 // icmp pred (and X, Y), X 2611 if (LBO && match(LBO, m_CombineOr(m_And(m_Value(), m_Specific(RHS)), 2612 m_And(m_Specific(RHS), m_Value())))) { 2613 if (Pred == ICmpInst::ICMP_UGT) 2614 return getFalse(ITy); 2615 if (Pred == ICmpInst::ICMP_ULE) 2616 return getTrue(ITy); 2617 } 2618 // icmp pred X, (and X, Y) 2619 if (RBO && match(RBO, m_CombineOr(m_And(m_Value(), m_Specific(LHS)), 2620 m_And(m_Specific(LHS), m_Value())))) { 2621 if (Pred == ICmpInst::ICMP_UGE) 2622 return getTrue(ITy); 2623 if (Pred == ICmpInst::ICMP_ULT) 2624 return getFalse(ITy); 2625 } 2626 2627 // 0 - (zext X) pred C 2628 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) { 2629 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) { 2630 if (RHSC->getValue().isStrictlyPositive()) { 2631 if (Pred == ICmpInst::ICMP_SLT) 2632 return ConstantInt::getTrue(RHSC->getContext()); 2633 if (Pred == ICmpInst::ICMP_SGE) 2634 return ConstantInt::getFalse(RHSC->getContext()); 2635 if (Pred == ICmpInst::ICMP_EQ) 2636 return ConstantInt::getFalse(RHSC->getContext()); 2637 if (Pred == ICmpInst::ICMP_NE) 2638 return ConstantInt::getTrue(RHSC->getContext()); 2639 } 2640 if (RHSC->getValue().isNonNegative()) { 2641 if (Pred == ICmpInst::ICMP_SLE) 2642 return ConstantInt::getTrue(RHSC->getContext()); 2643 if (Pred == ICmpInst::ICMP_SGT) 2644 return ConstantInt::getFalse(RHSC->getContext()); 2645 } 2646 } 2647 } 2648 2649 // icmp pred (urem X, Y), Y 2650 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) { 2651 bool KnownNonNegative, KnownNegative; 2652 switch (Pred) { 2653 default: 2654 break; 2655 case ICmpInst::ICMP_SGT: 2656 case ICmpInst::ICMP_SGE: 2657 ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC, 2658 Q.CxtI, Q.DT); 2659 if (!KnownNonNegative) 2660 break; 2661 LLVM_FALLTHROUGH; 2662 case ICmpInst::ICMP_EQ: 2663 case ICmpInst::ICMP_UGT: 2664 case ICmpInst::ICMP_UGE: 2665 return getFalse(ITy); 2666 case ICmpInst::ICMP_SLT: 2667 case ICmpInst::ICMP_SLE: 2668 ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC, 2669 Q.CxtI, Q.DT); 2670 if (!KnownNonNegative) 2671 break; 2672 LLVM_FALLTHROUGH; 2673 case ICmpInst::ICMP_NE: 2674 case ICmpInst::ICMP_ULT: 2675 case ICmpInst::ICMP_ULE: 2676 return getTrue(ITy); 2677 } 2678 } 2679 2680 // icmp pred X, (urem Y, X) 2681 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) { 2682 bool KnownNonNegative, KnownNegative; 2683 switch (Pred) { 2684 default: 2685 break; 2686 case ICmpInst::ICMP_SGT: 2687 case ICmpInst::ICMP_SGE: 2688 ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC, 2689 Q.CxtI, Q.DT); 2690 if (!KnownNonNegative) 2691 break; 2692 LLVM_FALLTHROUGH; 2693 case ICmpInst::ICMP_NE: 2694 case ICmpInst::ICMP_UGT: 2695 case ICmpInst::ICMP_UGE: 2696 return getTrue(ITy); 2697 case ICmpInst::ICMP_SLT: 2698 case ICmpInst::ICMP_SLE: 2699 ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL, 0, Q.AC, 2700 Q.CxtI, Q.DT); 2701 if (!KnownNonNegative) 2702 break; 2703 LLVM_FALLTHROUGH; 2704 case ICmpInst::ICMP_EQ: 2705 case ICmpInst::ICMP_ULT: 2706 case ICmpInst::ICMP_ULE: 2707 return getFalse(ITy); 2708 } 2709 } 2710 2711 // x >> y <=u x 2712 // x udiv y <=u x. 2713 if (LBO && (match(LBO, m_LShr(m_Specific(RHS), m_Value())) || 2714 match(LBO, m_UDiv(m_Specific(RHS), m_Value())))) { 2715 // icmp pred (X op Y), X 2716 if (Pred == ICmpInst::ICMP_UGT) 2717 return getFalse(ITy); 2718 if (Pred == ICmpInst::ICMP_ULE) 2719 return getTrue(ITy); 2720 } 2721 2722 // x >=u x >> y 2723 // x >=u x udiv y. 2724 if (RBO && (match(RBO, m_LShr(m_Specific(LHS), m_Value())) || 2725 match(RBO, m_UDiv(m_Specific(LHS), m_Value())))) { 2726 // icmp pred X, (X op Y) 2727 if (Pred == ICmpInst::ICMP_ULT) 2728 return getFalse(ITy); 2729 if (Pred == ICmpInst::ICMP_UGE) 2730 return getTrue(ITy); 2731 } 2732 2733 // handle: 2734 // CI2 << X == CI 2735 // CI2 << X != CI 2736 // 2737 // where CI2 is a power of 2 and CI isn't 2738 if (auto *CI = dyn_cast<ConstantInt>(RHS)) { 2739 const APInt *CI2Val, *CIVal = &CI->getValue(); 2740 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) && 2741 CI2Val->isPowerOf2()) { 2742 if (!CIVal->isPowerOf2()) { 2743 // CI2 << X can equal zero in some circumstances, 2744 // this simplification is unsafe if CI is zero. 2745 // 2746 // We know it is safe if: 2747 // - The shift is nsw, we can't shift out the one bit. 2748 // - The shift is nuw, we can't shift out the one bit. 2749 // - CI2 is one 2750 // - CI isn't zero 2751 if (LBO->hasNoSignedWrap() || LBO->hasNoUnsignedWrap() || 2752 *CI2Val == 1 || !CI->isZero()) { 2753 if (Pred == ICmpInst::ICMP_EQ) 2754 return ConstantInt::getFalse(RHS->getContext()); 2755 if (Pred == ICmpInst::ICMP_NE) 2756 return ConstantInt::getTrue(RHS->getContext()); 2757 } 2758 } 2759 if (CIVal->isSignBit() && *CI2Val == 1) { 2760 if (Pred == ICmpInst::ICMP_UGT) 2761 return ConstantInt::getFalse(RHS->getContext()); 2762 if (Pred == ICmpInst::ICMP_ULE) 2763 return ConstantInt::getTrue(RHS->getContext()); 2764 } 2765 } 2766 } 2767 2768 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() && 2769 LBO->getOperand(1) == RBO->getOperand(1)) { 2770 switch (LBO->getOpcode()) { 2771 default: 2772 break; 2773 case Instruction::UDiv: 2774 case Instruction::LShr: 2775 if (ICmpInst::isSigned(Pred)) 2776 break; 2777 LLVM_FALLTHROUGH; 2778 case Instruction::SDiv: 2779 case Instruction::AShr: 2780 if (!LBO->isExact() || !RBO->isExact()) 2781 break; 2782 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2783 RBO->getOperand(0), Q, MaxRecurse - 1)) 2784 return V; 2785 break; 2786 case Instruction::Shl: { 2787 bool NUW = LBO->hasNoUnsignedWrap() && RBO->hasNoUnsignedWrap(); 2788 bool NSW = LBO->hasNoSignedWrap() && RBO->hasNoSignedWrap(); 2789 if (!NUW && !NSW) 2790 break; 2791 if (!NSW && ICmpInst::isSigned(Pred)) 2792 break; 2793 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2794 RBO->getOperand(0), Q, MaxRecurse - 1)) 2795 return V; 2796 break; 2797 } 2798 } 2799 } 2800 return nullptr; 2801 } 2802 2803 /// Simplify comparisons corresponding to integer min/max idioms. 2804 static Value *simplifyMinMax(CmpInst::Predicate Pred, Value *LHS, Value *RHS, 2805 const Query &Q, unsigned MaxRecurse) { 2806 Type *ITy = GetCompareTy(LHS); // The return type. 2807 Value *A, *B; 2808 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE; 2809 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B". 2810 2811 // Signed variants on "max(a,b)>=a -> true". 2812 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 2813 if (A != RHS) 2814 std::swap(A, B); // smax(A, B) pred A. 2815 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 2816 // We analyze this as smax(A, B) pred A. 2817 P = Pred; 2818 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) && 2819 (A == LHS || B == LHS)) { 2820 if (A != LHS) 2821 std::swap(A, B); // A pred smax(A, B). 2822 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 2823 // We analyze this as smax(A, B) swapped-pred A. 2824 P = CmpInst::getSwappedPredicate(Pred); 2825 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 2826 (A == RHS || B == RHS)) { 2827 if (A != RHS) 2828 std::swap(A, B); // smin(A, B) pred A. 2829 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 2830 // We analyze this as smax(-A, -B) swapped-pred -A. 2831 // Note that we do not need to actually form -A or -B thanks to EqP. 2832 P = CmpInst::getSwappedPredicate(Pred); 2833 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) && 2834 (A == LHS || B == LHS)) { 2835 if (A != LHS) 2836 std::swap(A, B); // A pred smin(A, B). 2837 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 2838 // We analyze this as smax(-A, -B) pred -A. 2839 // Note that we do not need to actually form -A or -B thanks to EqP. 2840 P = Pred; 2841 } 2842 if (P != CmpInst::BAD_ICMP_PREDICATE) { 2843 // Cases correspond to "max(A, B) p A". 2844 switch (P) { 2845 default: 2846 break; 2847 case CmpInst::ICMP_EQ: 2848 case CmpInst::ICMP_SLE: 2849 // Equivalent to "A EqP B". This may be the same as the condition tested 2850 // in the max/min; if so, we can just return that. 2851 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 2852 return V; 2853 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 2854 return V; 2855 // Otherwise, see if "A EqP B" simplifies. 2856 if (MaxRecurse) 2857 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1)) 2858 return V; 2859 break; 2860 case CmpInst::ICMP_NE: 2861 case CmpInst::ICMP_SGT: { 2862 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 2863 // Equivalent to "A InvEqP B". This may be the same as the condition 2864 // tested in the max/min; if so, we can just return that. 2865 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 2866 return V; 2867 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 2868 return V; 2869 // Otherwise, see if "A InvEqP B" simplifies. 2870 if (MaxRecurse) 2871 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1)) 2872 return V; 2873 break; 2874 } 2875 case CmpInst::ICMP_SGE: 2876 // Always true. 2877 return getTrue(ITy); 2878 case CmpInst::ICMP_SLT: 2879 // Always false. 2880 return getFalse(ITy); 2881 } 2882 } 2883 2884 // Unsigned variants on "max(a,b)>=a -> true". 2885 P = CmpInst::BAD_ICMP_PREDICATE; 2886 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 2887 if (A != RHS) 2888 std::swap(A, B); // umax(A, B) pred A. 2889 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 2890 // We analyze this as umax(A, B) pred A. 2891 P = Pred; 2892 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) && 2893 (A == LHS || B == LHS)) { 2894 if (A != LHS) 2895 std::swap(A, B); // A pred umax(A, B). 2896 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 2897 // We analyze this as umax(A, B) swapped-pred A. 2898 P = CmpInst::getSwappedPredicate(Pred); 2899 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 2900 (A == RHS || B == RHS)) { 2901 if (A != RHS) 2902 std::swap(A, B); // umin(A, B) pred A. 2903 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 2904 // We analyze this as umax(-A, -B) swapped-pred -A. 2905 // Note that we do not need to actually form -A or -B thanks to EqP. 2906 P = CmpInst::getSwappedPredicate(Pred); 2907 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) && 2908 (A == LHS || B == LHS)) { 2909 if (A != LHS) 2910 std::swap(A, B); // A pred umin(A, B). 2911 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 2912 // We analyze this as umax(-A, -B) pred -A. 2913 // Note that we do not need to actually form -A or -B thanks to EqP. 2914 P = Pred; 2915 } 2916 if (P != CmpInst::BAD_ICMP_PREDICATE) { 2917 // Cases correspond to "max(A, B) p A". 2918 switch (P) { 2919 default: 2920 break; 2921 case CmpInst::ICMP_EQ: 2922 case CmpInst::ICMP_ULE: 2923 // Equivalent to "A EqP B". This may be the same as the condition tested 2924 // in the max/min; if so, we can just return that. 2925 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 2926 return V; 2927 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 2928 return V; 2929 // Otherwise, see if "A EqP B" simplifies. 2930 if (MaxRecurse) 2931 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1)) 2932 return V; 2933 break; 2934 case CmpInst::ICMP_NE: 2935 case CmpInst::ICMP_UGT: { 2936 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 2937 // Equivalent to "A InvEqP B". This may be the same as the condition 2938 // tested in the max/min; if so, we can just return that. 2939 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 2940 return V; 2941 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 2942 return V; 2943 // Otherwise, see if "A InvEqP B" simplifies. 2944 if (MaxRecurse) 2945 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1)) 2946 return V; 2947 break; 2948 } 2949 case CmpInst::ICMP_UGE: 2950 // Always true. 2951 return getTrue(ITy); 2952 case CmpInst::ICMP_ULT: 2953 // Always false. 2954 return getFalse(ITy); 2955 } 2956 } 2957 2958 // Variants on "max(x,y) >= min(x,z)". 2959 Value *C, *D; 2960 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && 2961 match(RHS, m_SMin(m_Value(C), m_Value(D))) && 2962 (A == C || A == D || B == C || B == D)) { 2963 // max(x, ?) pred min(x, ?). 2964 if (Pred == CmpInst::ICMP_SGE) 2965 // Always true. 2966 return getTrue(ITy); 2967 if (Pred == CmpInst::ICMP_SLT) 2968 // Always false. 2969 return getFalse(ITy); 2970 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 2971 match(RHS, m_SMax(m_Value(C), m_Value(D))) && 2972 (A == C || A == D || B == C || B == D)) { 2973 // min(x, ?) pred max(x, ?). 2974 if (Pred == CmpInst::ICMP_SLE) 2975 // Always true. 2976 return getTrue(ITy); 2977 if (Pred == CmpInst::ICMP_SGT) 2978 // Always false. 2979 return getFalse(ITy); 2980 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && 2981 match(RHS, m_UMin(m_Value(C), m_Value(D))) && 2982 (A == C || A == D || B == C || B == D)) { 2983 // max(x, ?) pred min(x, ?). 2984 if (Pred == CmpInst::ICMP_UGE) 2985 // Always true. 2986 return getTrue(ITy); 2987 if (Pred == CmpInst::ICMP_ULT) 2988 // Always false. 2989 return getFalse(ITy); 2990 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 2991 match(RHS, m_UMax(m_Value(C), m_Value(D))) && 2992 (A == C || A == D || B == C || B == D)) { 2993 // min(x, ?) pred max(x, ?). 2994 if (Pred == CmpInst::ICMP_ULE) 2995 // Always true. 2996 return getTrue(ITy); 2997 if (Pred == CmpInst::ICMP_UGT) 2998 // Always false. 2999 return getFalse(ITy); 3000 } 3001 3002 return nullptr; 3003 } 3004 3005 /// Given operands for an ICmpInst, see if we can fold the result. 3006 /// If not, this returns null. 3007 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3008 const Query &Q, unsigned MaxRecurse) { 3009 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 3010 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!"); 3011 3012 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 3013 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 3014 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 3015 3016 // If we have a constant, make sure it is on the RHS. 3017 std::swap(LHS, RHS); 3018 Pred = CmpInst::getSwappedPredicate(Pred); 3019 } 3020 3021 Type *ITy = GetCompareTy(LHS); // The return type. 3022 3023 // icmp X, X -> true/false 3024 // X icmp undef -> true/false. For example, icmp ugt %X, undef -> false 3025 // because X could be 0. 3026 if (LHS == RHS || isa<UndefValue>(RHS)) 3027 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred)); 3028 3029 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q)) 3030 return V; 3031 3032 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q)) 3033 return V; 3034 3035 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS)) 3036 return V; 3037 3038 // If both operands have range metadata, use the metadata 3039 // to simplify the comparison. 3040 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) { 3041 auto RHS_Instr = dyn_cast<Instruction>(RHS); 3042 auto LHS_Instr = dyn_cast<Instruction>(LHS); 3043 3044 if (RHS_Instr->getMetadata(LLVMContext::MD_range) && 3045 LHS_Instr->getMetadata(LLVMContext::MD_range)) { 3046 auto RHS_CR = getConstantRangeFromMetadata( 3047 *RHS_Instr->getMetadata(LLVMContext::MD_range)); 3048 auto LHS_CR = getConstantRangeFromMetadata( 3049 *LHS_Instr->getMetadata(LLVMContext::MD_range)); 3050 3051 auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR); 3052 if (Satisfied_CR.contains(LHS_CR)) 3053 return ConstantInt::getTrue(RHS->getContext()); 3054 3055 auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion( 3056 CmpInst::getInversePredicate(Pred), RHS_CR); 3057 if (InversedSatisfied_CR.contains(LHS_CR)) 3058 return ConstantInt::getFalse(RHS->getContext()); 3059 } 3060 } 3061 3062 // Compare of cast, for example (zext X) != 0 -> X != 0 3063 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) { 3064 Instruction *LI = cast<CastInst>(LHS); 3065 Value *SrcOp = LI->getOperand(0); 3066 Type *SrcTy = SrcOp->getType(); 3067 Type *DstTy = LI->getType(); 3068 3069 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input 3070 // if the integer type is the same size as the pointer type. 3071 if (MaxRecurse && isa<PtrToIntInst>(LI) && 3072 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) { 3073 if (Constant *RHSC = dyn_cast<Constant>(RHS)) { 3074 // Transfer the cast to the constant. 3075 if (Value *V = SimplifyICmpInst(Pred, SrcOp, 3076 ConstantExpr::getIntToPtr(RHSC, SrcTy), 3077 Q, MaxRecurse-1)) 3078 return V; 3079 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) { 3080 if (RI->getOperand(0)->getType() == SrcTy) 3081 // Compare without the cast. 3082 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 3083 Q, MaxRecurse-1)) 3084 return V; 3085 } 3086 } 3087 3088 if (isa<ZExtInst>(LHS)) { 3089 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the 3090 // same type. 3091 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) { 3092 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 3093 // Compare X and Y. Note that signed predicates become unsigned. 3094 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 3095 SrcOp, RI->getOperand(0), Q, 3096 MaxRecurse-1)) 3097 return V; 3098 } 3099 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended 3100 // too. If not, then try to deduce the result of the comparison. 3101 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 3102 // Compute the constant that would happen if we truncated to SrcTy then 3103 // reextended to DstTy. 3104 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 3105 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy); 3106 3107 // If the re-extended constant didn't change then this is effectively 3108 // also a case of comparing two zero-extended values. 3109 if (RExt == CI && MaxRecurse) 3110 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 3111 SrcOp, Trunc, Q, MaxRecurse-1)) 3112 return V; 3113 3114 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit 3115 // there. Use this to work out the result of the comparison. 3116 if (RExt != CI) { 3117 switch (Pred) { 3118 default: llvm_unreachable("Unknown ICmp predicate!"); 3119 // LHS <u RHS. 3120 case ICmpInst::ICMP_EQ: 3121 case ICmpInst::ICMP_UGT: 3122 case ICmpInst::ICMP_UGE: 3123 return ConstantInt::getFalse(CI->getContext()); 3124 3125 case ICmpInst::ICMP_NE: 3126 case ICmpInst::ICMP_ULT: 3127 case ICmpInst::ICMP_ULE: 3128 return ConstantInt::getTrue(CI->getContext()); 3129 3130 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS 3131 // is non-negative then LHS <s RHS. 3132 case ICmpInst::ICMP_SGT: 3133 case ICmpInst::ICMP_SGE: 3134 return CI->getValue().isNegative() ? 3135 ConstantInt::getTrue(CI->getContext()) : 3136 ConstantInt::getFalse(CI->getContext()); 3137 3138 case ICmpInst::ICMP_SLT: 3139 case ICmpInst::ICMP_SLE: 3140 return CI->getValue().isNegative() ? 3141 ConstantInt::getFalse(CI->getContext()) : 3142 ConstantInt::getTrue(CI->getContext()); 3143 } 3144 } 3145 } 3146 } 3147 3148 if (isa<SExtInst>(LHS)) { 3149 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the 3150 // same type. 3151 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) { 3152 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 3153 // Compare X and Y. Note that the predicate does not change. 3154 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 3155 Q, MaxRecurse-1)) 3156 return V; 3157 } 3158 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended 3159 // too. If not, then try to deduce the result of the comparison. 3160 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 3161 // Compute the constant that would happen if we truncated to SrcTy then 3162 // reextended to DstTy. 3163 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 3164 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy); 3165 3166 // If the re-extended constant didn't change then this is effectively 3167 // also a case of comparing two sign-extended values. 3168 if (RExt == CI && MaxRecurse) 3169 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1)) 3170 return V; 3171 3172 // Otherwise the upper bits of LHS are all equal, while RHS has varying 3173 // bits there. Use this to work out the result of the comparison. 3174 if (RExt != CI) { 3175 switch (Pred) { 3176 default: llvm_unreachable("Unknown ICmp predicate!"); 3177 case ICmpInst::ICMP_EQ: 3178 return ConstantInt::getFalse(CI->getContext()); 3179 case ICmpInst::ICMP_NE: 3180 return ConstantInt::getTrue(CI->getContext()); 3181 3182 // If RHS is non-negative then LHS <s RHS. If RHS is negative then 3183 // LHS >s RHS. 3184 case ICmpInst::ICMP_SGT: 3185 case ICmpInst::ICMP_SGE: 3186 return CI->getValue().isNegative() ? 3187 ConstantInt::getTrue(CI->getContext()) : 3188 ConstantInt::getFalse(CI->getContext()); 3189 case ICmpInst::ICMP_SLT: 3190 case ICmpInst::ICMP_SLE: 3191 return CI->getValue().isNegative() ? 3192 ConstantInt::getFalse(CI->getContext()) : 3193 ConstantInt::getTrue(CI->getContext()); 3194 3195 // If LHS is non-negative then LHS <u RHS. If LHS is negative then 3196 // LHS >u RHS. 3197 case ICmpInst::ICMP_UGT: 3198 case ICmpInst::ICMP_UGE: 3199 // Comparison is true iff the LHS <s 0. 3200 if (MaxRecurse) 3201 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp, 3202 Constant::getNullValue(SrcTy), 3203 Q, MaxRecurse-1)) 3204 return V; 3205 break; 3206 case ICmpInst::ICMP_ULT: 3207 case ICmpInst::ICMP_ULE: 3208 // Comparison is true iff the LHS >=s 0. 3209 if (MaxRecurse) 3210 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp, 3211 Constant::getNullValue(SrcTy), 3212 Q, MaxRecurse-1)) 3213 return V; 3214 break; 3215 } 3216 } 3217 } 3218 } 3219 } 3220 3221 // icmp eq|ne X, Y -> false|true if X != Y 3222 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) && 3223 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT)) { 3224 LLVMContext &Ctx = LHS->getType()->getContext(); 3225 return Pred == ICmpInst::ICMP_NE ? 3226 ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx); 3227 } 3228 3229 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse)) 3230 return V; 3231 3232 if (Value *V = simplifyMinMax(Pred, LHS, RHS, Q, MaxRecurse)) 3233 return V; 3234 3235 // Simplify comparisons of related pointers using a powerful, recursive 3236 // GEP-walk when we have target data available.. 3237 if (LHS->getType()->isPointerTy()) 3238 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI, LHS, RHS)) 3239 return C; 3240 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS)) 3241 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS)) 3242 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) == 3243 Q.DL.getTypeSizeInBits(CLHS->getType()) && 3244 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) == 3245 Q.DL.getTypeSizeInBits(CRHS->getType())) 3246 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI, 3247 CLHS->getPointerOperand(), 3248 CRHS->getPointerOperand())) 3249 return C; 3250 3251 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) { 3252 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) { 3253 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() && 3254 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() && 3255 (ICmpInst::isEquality(Pred) || 3256 (GLHS->isInBounds() && GRHS->isInBounds() && 3257 Pred == ICmpInst::getSignedPredicate(Pred)))) { 3258 // The bases are equal and the indices are constant. Build a constant 3259 // expression GEP with the same indices and a null base pointer to see 3260 // what constant folding can make out of it. 3261 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType()); 3262 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end()); 3263 Constant *NewLHS = ConstantExpr::getGetElementPtr( 3264 GLHS->getSourceElementType(), Null, IndicesLHS); 3265 3266 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end()); 3267 Constant *NewRHS = ConstantExpr::getGetElementPtr( 3268 GLHS->getSourceElementType(), Null, IndicesRHS); 3269 return ConstantExpr::getICmp(Pred, NewLHS, NewRHS); 3270 } 3271 } 3272 } 3273 3274 // If a bit is known to be zero for A and known to be one for B, 3275 // then A and B cannot be equal. 3276 if (ICmpInst::isEquality(Pred)) { 3277 const APInt *RHSVal; 3278 if (match(RHS, m_APInt(RHSVal))) { 3279 unsigned BitWidth = RHSVal->getBitWidth(); 3280 APInt LHSKnownZero(BitWidth, 0); 3281 APInt LHSKnownOne(BitWidth, 0); 3282 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, Q.DL, /*Depth=*/0, Q.AC, 3283 Q.CxtI, Q.DT); 3284 if (((LHSKnownZero & *RHSVal) != 0) || ((LHSKnownOne & ~(*RHSVal)) != 0)) 3285 return Pred == ICmpInst::ICMP_EQ ? ConstantInt::getFalse(ITy) 3286 : ConstantInt::getTrue(ITy); 3287 } 3288 } 3289 3290 // If the comparison is with the result of a select instruction, check whether 3291 // comparing with either branch of the select always yields the same value. 3292 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 3293 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 3294 return V; 3295 3296 // If the comparison is with the result of a phi instruction, check whether 3297 // doing the compare with each incoming phi value yields a common result. 3298 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 3299 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 3300 return V; 3301 3302 return nullptr; 3303 } 3304 3305 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3306 const DataLayout &DL, 3307 const TargetLibraryInfo *TLI, 3308 const DominatorTree *DT, AssumptionCache *AC, 3309 const Instruction *CxtI) { 3310 return ::SimplifyICmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI), 3311 RecursionLimit); 3312 } 3313 3314 /// Given operands for an FCmpInst, see if we can fold the result. 3315 /// If not, this returns null. 3316 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3317 FastMathFlags FMF, const Query &Q, 3318 unsigned MaxRecurse) { 3319 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 3320 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!"); 3321 3322 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 3323 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 3324 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 3325 3326 // If we have a constant, make sure it is on the RHS. 3327 std::swap(LHS, RHS); 3328 Pred = CmpInst::getSwappedPredicate(Pred); 3329 } 3330 3331 // Fold trivial predicates. 3332 Type *RetTy = GetCompareTy(LHS); 3333 if (Pred == FCmpInst::FCMP_FALSE) 3334 return getFalse(RetTy); 3335 if (Pred == FCmpInst::FCMP_TRUE) 3336 return getTrue(RetTy); 3337 3338 // UNO/ORD predicates can be trivially folded if NaNs are ignored. 3339 if (FMF.noNaNs()) { 3340 if (Pred == FCmpInst::FCMP_UNO) 3341 return getFalse(RetTy); 3342 if (Pred == FCmpInst::FCMP_ORD) 3343 return getTrue(RetTy); 3344 } 3345 3346 // fcmp pred x, undef and fcmp pred undef, x 3347 // fold to true if unordered, false if ordered 3348 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) { 3349 // Choosing NaN for the undef will always make unordered comparison succeed 3350 // and ordered comparison fail. 3351 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred)); 3352 } 3353 3354 // fcmp x,x -> true/false. Not all compares are foldable. 3355 if (LHS == RHS) { 3356 if (CmpInst::isTrueWhenEqual(Pred)) 3357 return getTrue(RetTy); 3358 if (CmpInst::isFalseWhenEqual(Pred)) 3359 return getFalse(RetTy); 3360 } 3361 3362 // Handle fcmp with constant RHS 3363 const ConstantFP *CFP = nullptr; 3364 if (const auto *RHSC = dyn_cast<Constant>(RHS)) { 3365 if (RHS->getType()->isVectorTy()) 3366 CFP = dyn_cast_or_null<ConstantFP>(RHSC->getSplatValue()); 3367 else 3368 CFP = dyn_cast<ConstantFP>(RHSC); 3369 } 3370 if (CFP) { 3371 // If the constant is a nan, see if we can fold the comparison based on it. 3372 if (CFP->getValueAPF().isNaN()) { 3373 if (FCmpInst::isOrdered(Pred)) // True "if ordered and foo" 3374 return getFalse(RetTy); 3375 assert(FCmpInst::isUnordered(Pred) && 3376 "Comparison must be either ordered or unordered!"); 3377 // True if unordered. 3378 return getTrue(RetTy); 3379 } 3380 // Check whether the constant is an infinity. 3381 if (CFP->getValueAPF().isInfinity()) { 3382 if (CFP->getValueAPF().isNegative()) { 3383 switch (Pred) { 3384 case FCmpInst::FCMP_OLT: 3385 // No value is ordered and less than negative infinity. 3386 return getFalse(RetTy); 3387 case FCmpInst::FCMP_UGE: 3388 // All values are unordered with or at least negative infinity. 3389 return getTrue(RetTy); 3390 default: 3391 break; 3392 } 3393 } else { 3394 switch (Pred) { 3395 case FCmpInst::FCMP_OGT: 3396 // No value is ordered and greater than infinity. 3397 return getFalse(RetTy); 3398 case FCmpInst::FCMP_ULE: 3399 // All values are unordered with and at most infinity. 3400 return getTrue(RetTy); 3401 default: 3402 break; 3403 } 3404 } 3405 } 3406 if (CFP->getValueAPF().isZero()) { 3407 switch (Pred) { 3408 case FCmpInst::FCMP_UGE: 3409 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3410 return getTrue(RetTy); 3411 break; 3412 case FCmpInst::FCMP_OLT: 3413 // X < 0 3414 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3415 return getFalse(RetTy); 3416 break; 3417 default: 3418 break; 3419 } 3420 } 3421 } 3422 3423 // If the comparison is with the result of a select instruction, check whether 3424 // comparing with either branch of the select always yields the same value. 3425 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 3426 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 3427 return V; 3428 3429 // If the comparison is with the result of a phi instruction, check whether 3430 // doing the compare with each incoming phi value yields a common result. 3431 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 3432 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 3433 return V; 3434 3435 return nullptr; 3436 } 3437 3438 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3439 FastMathFlags FMF, const DataLayout &DL, 3440 const TargetLibraryInfo *TLI, 3441 const DominatorTree *DT, AssumptionCache *AC, 3442 const Instruction *CxtI) { 3443 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, 3444 Query(DL, TLI, DT, AC, CxtI), RecursionLimit); 3445 } 3446 3447 /// See if V simplifies when its operand Op is replaced with RepOp. 3448 static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, 3449 const Query &Q, 3450 unsigned MaxRecurse) { 3451 // Trivial replacement. 3452 if (V == Op) 3453 return RepOp; 3454 3455 auto *I = dyn_cast<Instruction>(V); 3456 if (!I) 3457 return nullptr; 3458 3459 // If this is a binary operator, try to simplify it with the replaced op. 3460 if (auto *B = dyn_cast<BinaryOperator>(I)) { 3461 // Consider: 3462 // %cmp = icmp eq i32 %x, 2147483647 3463 // %add = add nsw i32 %x, 1 3464 // %sel = select i1 %cmp, i32 -2147483648, i32 %add 3465 // 3466 // We can't replace %sel with %add unless we strip away the flags. 3467 if (isa<OverflowingBinaryOperator>(B)) 3468 if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap()) 3469 return nullptr; 3470 if (isa<PossiblyExactOperator>(B)) 3471 if (B->isExact()) 3472 return nullptr; 3473 3474 if (MaxRecurse) { 3475 if (B->getOperand(0) == Op) 3476 return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q, 3477 MaxRecurse - 1); 3478 if (B->getOperand(1) == Op) 3479 return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q, 3480 MaxRecurse - 1); 3481 } 3482 } 3483 3484 // Same for CmpInsts. 3485 if (CmpInst *C = dyn_cast<CmpInst>(I)) { 3486 if (MaxRecurse) { 3487 if (C->getOperand(0) == Op) 3488 return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q, 3489 MaxRecurse - 1); 3490 if (C->getOperand(1) == Op) 3491 return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q, 3492 MaxRecurse - 1); 3493 } 3494 } 3495 3496 // TODO: We could hand off more cases to instsimplify here. 3497 3498 // If all operands are constant after substituting Op for RepOp then we can 3499 // constant fold the instruction. 3500 if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) { 3501 // Build a list of all constant operands. 3502 SmallVector<Constant *, 8> ConstOps; 3503 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3504 if (I->getOperand(i) == Op) 3505 ConstOps.push_back(CRepOp); 3506 else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i))) 3507 ConstOps.push_back(COp); 3508 else 3509 break; 3510 } 3511 3512 // All operands were constants, fold it. 3513 if (ConstOps.size() == I->getNumOperands()) { 3514 if (CmpInst *C = dyn_cast<CmpInst>(I)) 3515 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0], 3516 ConstOps[1], Q.DL, Q.TLI); 3517 3518 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 3519 if (!LI->isVolatile()) 3520 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL); 3521 3522 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI); 3523 } 3524 } 3525 3526 return nullptr; 3527 } 3528 3529 /// Try to simplify a select instruction when its condition operand is an 3530 /// integer comparison where one operand of the compare is a constant. 3531 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, 3532 const APInt *Y, bool TrueWhenUnset) { 3533 const APInt *C; 3534 3535 // (X & Y) == 0 ? X & ~Y : X --> X 3536 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y 3537 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) && 3538 *Y == ~*C) 3539 return TrueWhenUnset ? FalseVal : TrueVal; 3540 3541 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y 3542 // (X & Y) != 0 ? X : X & ~Y --> X 3543 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) && 3544 *Y == ~*C) 3545 return TrueWhenUnset ? FalseVal : TrueVal; 3546 3547 if (Y->isPowerOf2()) { 3548 // (X & Y) == 0 ? X | Y : X --> X | Y 3549 // (X & Y) != 0 ? X | Y : X --> X 3550 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) && 3551 *Y == *C) 3552 return TrueWhenUnset ? TrueVal : FalseVal; 3553 3554 // (X & Y) == 0 ? X : X | Y --> X 3555 // (X & Y) != 0 ? X : X | Y --> X | Y 3556 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) && 3557 *Y == *C) 3558 return TrueWhenUnset ? TrueVal : FalseVal; 3559 } 3560 3561 return nullptr; 3562 } 3563 3564 /// An alternative way to test if a bit is set or not uses sgt/slt instead of 3565 /// eq/ne. 3566 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *TrueVal, 3567 Value *FalseVal, 3568 bool TrueWhenUnset) { 3569 unsigned BitWidth = TrueVal->getType()->getScalarSizeInBits(); 3570 if (!BitWidth) 3571 return nullptr; 3572 3573 APInt MinSignedValue; 3574 Value *X; 3575 if (match(CmpLHS, m_Trunc(m_Value(X))) && (X == TrueVal || X == FalseVal)) { 3576 // icmp slt (trunc X), 0 <--> icmp ne (and X, C), 0 3577 // icmp sgt (trunc X), -1 <--> icmp eq (and X, C), 0 3578 unsigned DestSize = CmpLHS->getType()->getScalarSizeInBits(); 3579 MinSignedValue = APInt::getSignedMinValue(DestSize).zext(BitWidth); 3580 } else { 3581 // icmp slt X, 0 <--> icmp ne (and X, C), 0 3582 // icmp sgt X, -1 <--> icmp eq (and X, C), 0 3583 X = CmpLHS; 3584 MinSignedValue = APInt::getSignedMinValue(BitWidth); 3585 } 3586 3587 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, &MinSignedValue, 3588 TrueWhenUnset)) 3589 return V; 3590 3591 return nullptr; 3592 } 3593 3594 /// Try to simplify a select instruction when its condition operand is an 3595 /// integer comparison. 3596 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, 3597 Value *FalseVal, const Query &Q, 3598 unsigned MaxRecurse) { 3599 ICmpInst::Predicate Pred; 3600 Value *CmpLHS, *CmpRHS; 3601 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS)))) 3602 return nullptr; 3603 3604 // FIXME: This code is nearly duplicated in InstCombine. Using/refactoring 3605 // decomposeBitTestICmp() might help. 3606 if (ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero())) { 3607 Value *X; 3608 const APInt *Y; 3609 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y)))) 3610 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y, 3611 Pred == ICmpInst::ICMP_EQ)) 3612 return V; 3613 } else if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, m_Zero())) { 3614 // Comparing signed-less-than 0 checks if the sign bit is set. 3615 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, TrueVal, FalseVal, 3616 false)) 3617 return V; 3618 } else if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, m_AllOnes())) { 3619 // Comparing signed-greater-than -1 checks if the sign bit is not set. 3620 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, TrueVal, FalseVal, 3621 true)) 3622 return V; 3623 } 3624 3625 if (CondVal->hasOneUse()) { 3626 const APInt *C; 3627 if (match(CmpRHS, m_APInt(C))) { 3628 // X < MIN ? T : F --> F 3629 if (Pred == ICmpInst::ICMP_SLT && C->isMinSignedValue()) 3630 return FalseVal; 3631 // X < MIN ? T : F --> F 3632 if (Pred == ICmpInst::ICMP_ULT && C->isMinValue()) 3633 return FalseVal; 3634 // X > MAX ? T : F --> F 3635 if (Pred == ICmpInst::ICMP_SGT && C->isMaxSignedValue()) 3636 return FalseVal; 3637 // X > MAX ? T : F --> F 3638 if (Pred == ICmpInst::ICMP_UGT && C->isMaxValue()) 3639 return FalseVal; 3640 } 3641 } 3642 3643 // If we have an equality comparison, then we know the value in one of the 3644 // arms of the select. See if substituting this value into the arm and 3645 // simplifying the result yields the same value as the other arm. 3646 if (Pred == ICmpInst::ICMP_EQ) { 3647 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3648 TrueVal || 3649 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3650 TrueVal) 3651 return FalseVal; 3652 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3653 FalseVal || 3654 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3655 FalseVal) 3656 return FalseVal; 3657 } else if (Pred == ICmpInst::ICMP_NE) { 3658 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3659 FalseVal || 3660 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3661 FalseVal) 3662 return TrueVal; 3663 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3664 TrueVal || 3665 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3666 TrueVal) 3667 return TrueVal; 3668 } 3669 3670 return nullptr; 3671 } 3672 3673 /// Given operands for a SelectInst, see if we can fold the result. 3674 /// If not, this returns null. 3675 static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal, 3676 Value *FalseVal, const Query &Q, 3677 unsigned MaxRecurse) { 3678 // select true, X, Y -> X 3679 // select false, X, Y -> Y 3680 if (Constant *CB = dyn_cast<Constant>(CondVal)) { 3681 if (CB->isAllOnesValue()) 3682 return TrueVal; 3683 if (CB->isNullValue()) 3684 return FalseVal; 3685 } 3686 3687 // select C, X, X -> X 3688 if (TrueVal == FalseVal) 3689 return TrueVal; 3690 3691 if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y 3692 if (isa<Constant>(TrueVal)) 3693 return TrueVal; 3694 return FalseVal; 3695 } 3696 if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X 3697 return FalseVal; 3698 if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X 3699 return TrueVal; 3700 3701 if (Value *V = 3702 simplifySelectWithICmpCond(CondVal, TrueVal, FalseVal, Q, MaxRecurse)) 3703 return V; 3704 3705 return nullptr; 3706 } 3707 3708 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, 3709 const DataLayout &DL, 3710 const TargetLibraryInfo *TLI, 3711 const DominatorTree *DT, AssumptionCache *AC, 3712 const Instruction *CxtI) { 3713 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, 3714 Query(DL, TLI, DT, AC, CxtI), RecursionLimit); 3715 } 3716 3717 /// Given operands for an GetElementPtrInst, see if we can fold the result. 3718 /// If not, this returns null. 3719 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, 3720 const Query &Q, unsigned) { 3721 // The type of the GEP pointer operand. 3722 unsigned AS = 3723 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace(); 3724 3725 // getelementptr P -> P. 3726 if (Ops.size() == 1) 3727 return Ops[0]; 3728 3729 // Compute the (pointer) type returned by the GEP instruction. 3730 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1)); 3731 Type *GEPTy = PointerType::get(LastType, AS); 3732 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType())) 3733 GEPTy = VectorType::get(GEPTy, VT->getNumElements()); 3734 3735 if (isa<UndefValue>(Ops[0])) 3736 return UndefValue::get(GEPTy); 3737 3738 if (Ops.size() == 2) { 3739 // getelementptr P, 0 -> P. 3740 if (match(Ops[1], m_Zero())) 3741 return Ops[0]; 3742 3743 Type *Ty = SrcTy; 3744 if (Ty->isSized()) { 3745 Value *P; 3746 uint64_t C; 3747 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty); 3748 // getelementptr P, N -> P if P points to a type of zero size. 3749 if (TyAllocSize == 0) 3750 return Ops[0]; 3751 3752 // The following transforms are only safe if the ptrtoint cast 3753 // doesn't truncate the pointers. 3754 if (Ops[1]->getType()->getScalarSizeInBits() == 3755 Q.DL.getPointerSizeInBits(AS)) { 3756 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * { 3757 if (match(P, m_Zero())) 3758 return Constant::getNullValue(GEPTy); 3759 Value *Temp; 3760 if (match(P, m_PtrToInt(m_Value(Temp)))) 3761 if (Temp->getType() == GEPTy) 3762 return Temp; 3763 return nullptr; 3764 }; 3765 3766 // getelementptr V, (sub P, V) -> P if P points to a type of size 1. 3767 if (TyAllocSize == 1 && 3768 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))))) 3769 if (Value *R = PtrToIntOrZero(P)) 3770 return R; 3771 3772 // getelementptr V, (ashr (sub P, V), C) -> Q 3773 // if P points to a type of size 1 << C. 3774 if (match(Ops[1], 3775 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 3776 m_ConstantInt(C))) && 3777 TyAllocSize == 1ULL << C) 3778 if (Value *R = PtrToIntOrZero(P)) 3779 return R; 3780 3781 // getelementptr V, (sdiv (sub P, V), C) -> Q 3782 // if P points to a type of size C. 3783 if (match(Ops[1], 3784 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 3785 m_SpecificInt(TyAllocSize)))) 3786 if (Value *R = PtrToIntOrZero(P)) 3787 return R; 3788 } 3789 } 3790 } 3791 3792 if (Q.DL.getTypeAllocSize(LastType) == 1 && 3793 all_of(Ops.slice(1).drop_back(1), 3794 [](Value *Idx) { return match(Idx, m_Zero()); })) { 3795 unsigned PtrWidth = 3796 Q.DL.getPointerSizeInBits(Ops[0]->getType()->getPointerAddressSpace()); 3797 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == PtrWidth) { 3798 APInt BasePtrOffset(PtrWidth, 0); 3799 Value *StrippedBasePtr = 3800 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL, 3801 BasePtrOffset); 3802 3803 // gep (gep V, C), (sub 0, V) -> C 3804 if (match(Ops.back(), 3805 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr))))) { 3806 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset); 3807 return ConstantExpr::getIntToPtr(CI, GEPTy); 3808 } 3809 // gep (gep V, C), (xor V, -1) -> C-1 3810 if (match(Ops.back(), 3811 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes()))) { 3812 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1); 3813 return ConstantExpr::getIntToPtr(CI, GEPTy); 3814 } 3815 } 3816 } 3817 3818 // Check to see if this is constant foldable. 3819 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3820 if (!isa<Constant>(Ops[i])) 3821 return nullptr; 3822 3823 return ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]), 3824 Ops.slice(1)); 3825 } 3826 3827 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, 3828 const DataLayout &DL, 3829 const TargetLibraryInfo *TLI, 3830 const DominatorTree *DT, AssumptionCache *AC, 3831 const Instruction *CxtI) { 3832 return ::SimplifyGEPInst(SrcTy, Ops, 3833 Query(DL, TLI, DT, AC, CxtI), RecursionLimit); 3834 } 3835 3836 /// Given operands for an InsertValueInst, see if we can fold the result. 3837 /// If not, this returns null. 3838 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val, 3839 ArrayRef<unsigned> Idxs, const Query &Q, 3840 unsigned) { 3841 if (Constant *CAgg = dyn_cast<Constant>(Agg)) 3842 if (Constant *CVal = dyn_cast<Constant>(Val)) 3843 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs); 3844 3845 // insertvalue x, undef, n -> x 3846 if (match(Val, m_Undef())) 3847 return Agg; 3848 3849 // insertvalue x, (extractvalue y, n), n 3850 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val)) 3851 if (EV->getAggregateOperand()->getType() == Agg->getType() && 3852 EV->getIndices() == Idxs) { 3853 // insertvalue undef, (extractvalue y, n), n -> y 3854 if (match(Agg, m_Undef())) 3855 return EV->getAggregateOperand(); 3856 3857 // insertvalue y, (extractvalue y, n), n -> y 3858 if (Agg == EV->getAggregateOperand()) 3859 return Agg; 3860 } 3861 3862 return nullptr; 3863 } 3864 3865 Value *llvm::SimplifyInsertValueInst( 3866 Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const DataLayout &DL, 3867 const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC, 3868 const Instruction *CxtI) { 3869 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query(DL, TLI, DT, AC, CxtI), 3870 RecursionLimit); 3871 } 3872 3873 /// Given operands for an ExtractValueInst, see if we can fold the result. 3874 /// If not, this returns null. 3875 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 3876 const Query &, unsigned) { 3877 if (auto *CAgg = dyn_cast<Constant>(Agg)) 3878 return ConstantFoldExtractValueInstruction(CAgg, Idxs); 3879 3880 // extractvalue x, (insertvalue y, elt, n), n -> elt 3881 unsigned NumIdxs = Idxs.size(); 3882 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr; 3883 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) { 3884 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices(); 3885 unsigned NumInsertValueIdxs = InsertValueIdxs.size(); 3886 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs); 3887 if (InsertValueIdxs.slice(0, NumCommonIdxs) == 3888 Idxs.slice(0, NumCommonIdxs)) { 3889 if (NumIdxs == NumInsertValueIdxs) 3890 return IVI->getInsertedValueOperand(); 3891 break; 3892 } 3893 } 3894 3895 return nullptr; 3896 } 3897 3898 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 3899 const DataLayout &DL, 3900 const TargetLibraryInfo *TLI, 3901 const DominatorTree *DT, 3902 AssumptionCache *AC, 3903 const Instruction *CxtI) { 3904 return ::SimplifyExtractValueInst(Agg, Idxs, Query(DL, TLI, DT, AC, CxtI), 3905 RecursionLimit); 3906 } 3907 3908 /// Given operands for an ExtractElementInst, see if we can fold the result. 3909 /// If not, this returns null. 3910 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const Query &, 3911 unsigned) { 3912 if (auto *CVec = dyn_cast<Constant>(Vec)) { 3913 if (auto *CIdx = dyn_cast<Constant>(Idx)) 3914 return ConstantFoldExtractElementInstruction(CVec, CIdx); 3915 3916 // The index is not relevant if our vector is a splat. 3917 if (auto *Splat = CVec->getSplatValue()) 3918 return Splat; 3919 3920 if (isa<UndefValue>(Vec)) 3921 return UndefValue::get(Vec->getType()->getVectorElementType()); 3922 } 3923 3924 // If extracting a specified index from the vector, see if we can recursively 3925 // find a previously computed scalar that was inserted into the vector. 3926 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) 3927 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue())) 3928 return Elt; 3929 3930 return nullptr; 3931 } 3932 3933 Value *llvm::SimplifyExtractElementInst( 3934 Value *Vec, Value *Idx, const DataLayout &DL, const TargetLibraryInfo *TLI, 3935 const DominatorTree *DT, AssumptionCache *AC, const Instruction *CxtI) { 3936 return ::SimplifyExtractElementInst(Vec, Idx, Query(DL, TLI, DT, AC, CxtI), 3937 RecursionLimit); 3938 } 3939 3940 /// See if we can fold the given phi. If not, returns null. 3941 static Value *SimplifyPHINode(PHINode *PN, const Query &Q) { 3942 // If all of the PHI's incoming values are the same then replace the PHI node 3943 // with the common value. 3944 Value *CommonValue = nullptr; 3945 bool HasUndefInput = false; 3946 for (Value *Incoming : PN->incoming_values()) { 3947 // If the incoming value is the phi node itself, it can safely be skipped. 3948 if (Incoming == PN) continue; 3949 if (isa<UndefValue>(Incoming)) { 3950 // Remember that we saw an undef value, but otherwise ignore them. 3951 HasUndefInput = true; 3952 continue; 3953 } 3954 if (CommonValue && Incoming != CommonValue) 3955 return nullptr; // Not the same, bail out. 3956 CommonValue = Incoming; 3957 } 3958 3959 // If CommonValue is null then all of the incoming values were either undef or 3960 // equal to the phi node itself. 3961 if (!CommonValue) 3962 return UndefValue::get(PN->getType()); 3963 3964 // If we have a PHI node like phi(X, undef, X), where X is defined by some 3965 // instruction, we cannot return X as the result of the PHI node unless it 3966 // dominates the PHI block. 3967 if (HasUndefInput) 3968 return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr; 3969 3970 return CommonValue; 3971 } 3972 3973 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op, 3974 Type *Ty, const Query &Q, unsigned MaxRecurse) { 3975 if (auto *C = dyn_cast<Constant>(Op)) 3976 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL); 3977 3978 if (auto *CI = dyn_cast<CastInst>(Op)) { 3979 auto *Src = CI->getOperand(0); 3980 Type *SrcTy = Src->getType(); 3981 Type *MidTy = CI->getType(); 3982 Type *DstTy = Ty; 3983 if (Src->getType() == Ty) { 3984 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode()); 3985 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc); 3986 Type *SrcIntPtrTy = 3987 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr; 3988 Type *MidIntPtrTy = 3989 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr; 3990 Type *DstIntPtrTy = 3991 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr; 3992 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy, 3993 SrcIntPtrTy, MidIntPtrTy, 3994 DstIntPtrTy) == Instruction::BitCast) 3995 return Src; 3996 } 3997 } 3998 3999 // bitcast x -> x 4000 if (CastOpc == Instruction::BitCast) 4001 if (Op->getType() == Ty) 4002 return Op; 4003 4004 return nullptr; 4005 } 4006 4007 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, 4008 const DataLayout &DL, 4009 const TargetLibraryInfo *TLI, 4010 const DominatorTree *DT, AssumptionCache *AC, 4011 const Instruction *CxtI) { 4012 return ::SimplifyCastInst(CastOpc, Op, Ty, Query(DL, TLI, DT, AC, CxtI), 4013 RecursionLimit); 4014 } 4015 4016 //=== Helper functions for higher up the class hierarchy. 4017 4018 /// Given operands for a BinaryOperator, see if we can fold the result. 4019 /// If not, this returns null. 4020 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4021 const Query &Q, unsigned MaxRecurse) { 4022 switch (Opcode) { 4023 case Instruction::Add: 4024 return SimplifyAddInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false, 4025 Q, MaxRecurse); 4026 case Instruction::FAdd: 4027 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4028 4029 case Instruction::Sub: 4030 return SimplifySubInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false, 4031 Q, MaxRecurse); 4032 case Instruction::FSub: 4033 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4034 4035 case Instruction::Mul: return SimplifyMulInst (LHS, RHS, Q, MaxRecurse); 4036 case Instruction::FMul: 4037 return SimplifyFMulInst (LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4038 case Instruction::SDiv: return SimplifySDivInst(LHS, RHS, Q, MaxRecurse); 4039 case Instruction::UDiv: return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse); 4040 case Instruction::FDiv: 4041 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4042 case Instruction::SRem: return SimplifySRemInst(LHS, RHS, Q, MaxRecurse); 4043 case Instruction::URem: return SimplifyURemInst(LHS, RHS, Q, MaxRecurse); 4044 case Instruction::FRem: 4045 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4046 case Instruction::Shl: 4047 return SimplifyShlInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false, 4048 Q, MaxRecurse); 4049 case Instruction::LShr: 4050 return SimplifyLShrInst(LHS, RHS, /*isExact*/false, Q, MaxRecurse); 4051 case Instruction::AShr: 4052 return SimplifyAShrInst(LHS, RHS, /*isExact*/false, Q, MaxRecurse); 4053 case Instruction::And: return SimplifyAndInst(LHS, RHS, Q, MaxRecurse); 4054 case Instruction::Or: return SimplifyOrInst (LHS, RHS, Q, MaxRecurse); 4055 case Instruction::Xor: return SimplifyXorInst(LHS, RHS, Q, MaxRecurse); 4056 default: 4057 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 4058 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 4059 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL); 4060 4061 // If the operation is associative, try some generic simplifications. 4062 if (Instruction::isAssociative(Opcode)) 4063 if (Value *V = SimplifyAssociativeBinOp(Opcode, LHS, RHS, Q, MaxRecurse)) 4064 return V; 4065 4066 // If the operation is with the result of a select instruction check whether 4067 // operating on either branch of the select always yields the same value. 4068 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 4069 if (Value *V = ThreadBinOpOverSelect(Opcode, LHS, RHS, Q, MaxRecurse)) 4070 return V; 4071 4072 // If the operation is with the result of a phi instruction, check whether 4073 // operating on all incoming values of the phi always yields the same value. 4074 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 4075 if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, Q, MaxRecurse)) 4076 return V; 4077 4078 return nullptr; 4079 } 4080 } 4081 4082 /// Given operands for a BinaryOperator, see if we can fold the result. 4083 /// If not, this returns null. 4084 /// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the 4085 /// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp. 4086 static Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4087 const FastMathFlags &FMF, const Query &Q, 4088 unsigned MaxRecurse) { 4089 switch (Opcode) { 4090 case Instruction::FAdd: 4091 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse); 4092 case Instruction::FSub: 4093 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse); 4094 case Instruction::FMul: 4095 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse); 4096 default: 4097 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse); 4098 } 4099 } 4100 4101 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4102 const DataLayout &DL, const TargetLibraryInfo *TLI, 4103 const DominatorTree *DT, AssumptionCache *AC, 4104 const Instruction *CxtI) { 4105 return ::SimplifyBinOp(Opcode, LHS, RHS, Query(DL, TLI, DT, AC, CxtI), 4106 RecursionLimit); 4107 } 4108 4109 Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4110 const FastMathFlags &FMF, const DataLayout &DL, 4111 const TargetLibraryInfo *TLI, 4112 const DominatorTree *DT, AssumptionCache *AC, 4113 const Instruction *CxtI) { 4114 return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, Query(DL, TLI, DT, AC, CxtI), 4115 RecursionLimit); 4116 } 4117 4118 /// Given operands for a CmpInst, see if we can fold the result. 4119 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 4120 const Query &Q, unsigned MaxRecurse) { 4121 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate)) 4122 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse); 4123 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4124 } 4125 4126 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 4127 const DataLayout &DL, const TargetLibraryInfo *TLI, 4128 const DominatorTree *DT, AssumptionCache *AC, 4129 const Instruction *CxtI) { 4130 return ::SimplifyCmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI), 4131 RecursionLimit); 4132 } 4133 4134 static bool IsIdempotent(Intrinsic::ID ID) { 4135 switch (ID) { 4136 default: return false; 4137 4138 // Unary idempotent: f(f(x)) = f(x) 4139 case Intrinsic::fabs: 4140 case Intrinsic::floor: 4141 case Intrinsic::ceil: 4142 case Intrinsic::trunc: 4143 case Intrinsic::rint: 4144 case Intrinsic::nearbyint: 4145 case Intrinsic::round: 4146 return true; 4147 } 4148 } 4149 4150 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset, 4151 const DataLayout &DL) { 4152 GlobalValue *PtrSym; 4153 APInt PtrOffset; 4154 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL)) 4155 return nullptr; 4156 4157 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext()); 4158 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext()); 4159 Type *Int32PtrTy = Int32Ty->getPointerTo(); 4160 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext()); 4161 4162 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset); 4163 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64) 4164 return nullptr; 4165 4166 uint64_t OffsetInt = OffsetConstInt->getSExtValue(); 4167 if (OffsetInt % 4 != 0) 4168 return nullptr; 4169 4170 Constant *C = ConstantExpr::getGetElementPtr( 4171 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy), 4172 ConstantInt::get(Int64Ty, OffsetInt / 4)); 4173 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL); 4174 if (!Loaded) 4175 return nullptr; 4176 4177 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded); 4178 if (!LoadedCE) 4179 return nullptr; 4180 4181 if (LoadedCE->getOpcode() == Instruction::Trunc) { 4182 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0)); 4183 if (!LoadedCE) 4184 return nullptr; 4185 } 4186 4187 if (LoadedCE->getOpcode() != Instruction::Sub) 4188 return nullptr; 4189 4190 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0)); 4191 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt) 4192 return nullptr; 4193 auto *LoadedLHSPtr = LoadedLHS->getOperand(0); 4194 4195 Constant *LoadedRHS = LoadedCE->getOperand(1); 4196 GlobalValue *LoadedRHSSym; 4197 APInt LoadedRHSOffset; 4198 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset, 4199 DL) || 4200 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset) 4201 return nullptr; 4202 4203 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy); 4204 } 4205 4206 static bool maskIsAllZeroOrUndef(Value *Mask) { 4207 auto *ConstMask = dyn_cast<Constant>(Mask); 4208 if (!ConstMask) 4209 return false; 4210 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask)) 4211 return true; 4212 for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E; 4213 ++I) { 4214 if (auto *MaskElt = ConstMask->getAggregateElement(I)) 4215 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt)) 4216 continue; 4217 return false; 4218 } 4219 return true; 4220 } 4221 4222 template <typename IterTy> 4223 static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd, 4224 const Query &Q, unsigned MaxRecurse) { 4225 Intrinsic::ID IID = F->getIntrinsicID(); 4226 unsigned NumOperands = std::distance(ArgBegin, ArgEnd); 4227 Type *ReturnType = F->getReturnType(); 4228 4229 // Binary Ops 4230 if (NumOperands == 2) { 4231 Value *LHS = *ArgBegin; 4232 Value *RHS = *(ArgBegin + 1); 4233 if (IID == Intrinsic::usub_with_overflow || 4234 IID == Intrinsic::ssub_with_overflow) { 4235 // X - X -> { 0, false } 4236 if (LHS == RHS) 4237 return Constant::getNullValue(ReturnType); 4238 4239 // X - undef -> undef 4240 // undef - X -> undef 4241 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) 4242 return UndefValue::get(ReturnType); 4243 } 4244 4245 if (IID == Intrinsic::uadd_with_overflow || 4246 IID == Intrinsic::sadd_with_overflow) { 4247 // X + undef -> undef 4248 if (isa<UndefValue>(RHS)) 4249 return UndefValue::get(ReturnType); 4250 } 4251 4252 if (IID == Intrinsic::umul_with_overflow || 4253 IID == Intrinsic::smul_with_overflow) { 4254 // X * 0 -> { 0, false } 4255 if (match(RHS, m_Zero())) 4256 return Constant::getNullValue(ReturnType); 4257 4258 // X * undef -> { 0, false } 4259 if (match(RHS, m_Undef())) 4260 return Constant::getNullValue(ReturnType); 4261 } 4262 4263 if (IID == Intrinsic::load_relative && isa<Constant>(LHS) && 4264 isa<Constant>(RHS)) 4265 return SimplifyRelativeLoad(cast<Constant>(LHS), cast<Constant>(RHS), 4266 Q.DL); 4267 } 4268 4269 // Simplify calls to llvm.masked.load.* 4270 if (IID == Intrinsic::masked_load) { 4271 Value *MaskArg = ArgBegin[2]; 4272 Value *PassthruArg = ArgBegin[3]; 4273 // If the mask is all zeros or undef, the "passthru" argument is the result. 4274 if (maskIsAllZeroOrUndef(MaskArg)) 4275 return PassthruArg; 4276 } 4277 4278 // Perform idempotent optimizations 4279 if (!IsIdempotent(IID)) 4280 return nullptr; 4281 4282 // Unary Ops 4283 if (NumOperands == 1) 4284 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin)) 4285 if (II->getIntrinsicID() == IID) 4286 return II; 4287 4288 return nullptr; 4289 } 4290 4291 template <typename IterTy> 4292 static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd, 4293 const Query &Q, unsigned MaxRecurse) { 4294 Type *Ty = V->getType(); 4295 if (PointerType *PTy = dyn_cast<PointerType>(Ty)) 4296 Ty = PTy->getElementType(); 4297 FunctionType *FTy = cast<FunctionType>(Ty); 4298 4299 // call undef -> undef 4300 // call null -> undef 4301 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V)) 4302 return UndefValue::get(FTy->getReturnType()); 4303 4304 Function *F = dyn_cast<Function>(V); 4305 if (!F) 4306 return nullptr; 4307 4308 if (F->isIntrinsic()) 4309 if (Value *Ret = SimplifyIntrinsic(F, ArgBegin, ArgEnd, Q, MaxRecurse)) 4310 return Ret; 4311 4312 if (!canConstantFoldCallTo(F)) 4313 return nullptr; 4314 4315 SmallVector<Constant *, 4> ConstantArgs; 4316 ConstantArgs.reserve(ArgEnd - ArgBegin); 4317 for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) { 4318 Constant *C = dyn_cast<Constant>(*I); 4319 if (!C) 4320 return nullptr; 4321 ConstantArgs.push_back(C); 4322 } 4323 4324 return ConstantFoldCall(F, ConstantArgs, Q.TLI); 4325 } 4326 4327 Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin, 4328 User::op_iterator ArgEnd, const DataLayout &DL, 4329 const TargetLibraryInfo *TLI, const DominatorTree *DT, 4330 AssumptionCache *AC, const Instruction *CxtI) { 4331 return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT, AC, CxtI), 4332 RecursionLimit); 4333 } 4334 4335 Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args, 4336 const DataLayout &DL, const TargetLibraryInfo *TLI, 4337 const DominatorTree *DT, AssumptionCache *AC, 4338 const Instruction *CxtI) { 4339 return ::SimplifyCall(V, Args.begin(), Args.end(), 4340 Query(DL, TLI, DT, AC, CxtI), RecursionLimit); 4341 } 4342 4343 /// See if we can compute a simplified version of this instruction. 4344 /// If not, this returns null. 4345 Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout &DL, 4346 const TargetLibraryInfo *TLI, 4347 const DominatorTree *DT, AssumptionCache *AC) { 4348 Value *Result; 4349 4350 switch (I->getOpcode()) { 4351 default: 4352 Result = ConstantFoldInstruction(I, DL, TLI); 4353 break; 4354 case Instruction::FAdd: 4355 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1), 4356 I->getFastMathFlags(), DL, TLI, DT, AC, I); 4357 break; 4358 case Instruction::Add: 4359 Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1), 4360 cast<BinaryOperator>(I)->hasNoSignedWrap(), 4361 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL, 4362 TLI, DT, AC, I); 4363 break; 4364 case Instruction::FSub: 4365 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1), 4366 I->getFastMathFlags(), DL, TLI, DT, AC, I); 4367 break; 4368 case Instruction::Sub: 4369 Result = SimplifySubInst(I->getOperand(0), I->getOperand(1), 4370 cast<BinaryOperator>(I)->hasNoSignedWrap(), 4371 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL, 4372 TLI, DT, AC, I); 4373 break; 4374 case Instruction::FMul: 4375 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1), 4376 I->getFastMathFlags(), DL, TLI, DT, AC, I); 4377 break; 4378 case Instruction::Mul: 4379 Result = 4380 SimplifyMulInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I); 4381 break; 4382 case Instruction::SDiv: 4383 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, 4384 AC, I); 4385 break; 4386 case Instruction::UDiv: 4387 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, 4388 AC, I); 4389 break; 4390 case Instruction::FDiv: 4391 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), 4392 I->getFastMathFlags(), DL, TLI, DT, AC, I); 4393 break; 4394 case Instruction::SRem: 4395 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, 4396 AC, I); 4397 break; 4398 case Instruction::URem: 4399 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, 4400 AC, I); 4401 break; 4402 case Instruction::FRem: 4403 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), 4404 I->getFastMathFlags(), DL, TLI, DT, AC, I); 4405 break; 4406 case Instruction::Shl: 4407 Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1), 4408 cast<BinaryOperator>(I)->hasNoSignedWrap(), 4409 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), DL, 4410 TLI, DT, AC, I); 4411 break; 4412 case Instruction::LShr: 4413 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1), 4414 cast<BinaryOperator>(I)->isExact(), DL, TLI, DT, 4415 AC, I); 4416 break; 4417 case Instruction::AShr: 4418 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1), 4419 cast<BinaryOperator>(I)->isExact(), DL, TLI, DT, 4420 AC, I); 4421 break; 4422 case Instruction::And: 4423 Result = 4424 SimplifyAndInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I); 4425 break; 4426 case Instruction::Or: 4427 Result = 4428 SimplifyOrInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I); 4429 break; 4430 case Instruction::Xor: 4431 Result = 4432 SimplifyXorInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT, AC, I); 4433 break; 4434 case Instruction::ICmp: 4435 Result = 4436 SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), I->getOperand(0), 4437 I->getOperand(1), DL, TLI, DT, AC, I); 4438 break; 4439 case Instruction::FCmp: 4440 Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), 4441 I->getOperand(0), I->getOperand(1), 4442 I->getFastMathFlags(), DL, TLI, DT, AC, I); 4443 break; 4444 case Instruction::Select: 4445 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1), 4446 I->getOperand(2), DL, TLI, DT, AC, I); 4447 break; 4448 case Instruction::GetElementPtr: { 4449 SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end()); 4450 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(), 4451 Ops, DL, TLI, DT, AC, I); 4452 break; 4453 } 4454 case Instruction::InsertValue: { 4455 InsertValueInst *IV = cast<InsertValueInst>(I); 4456 Result = SimplifyInsertValueInst(IV->getAggregateOperand(), 4457 IV->getInsertedValueOperand(), 4458 IV->getIndices(), DL, TLI, DT, AC, I); 4459 break; 4460 } 4461 case Instruction::ExtractValue: { 4462 auto *EVI = cast<ExtractValueInst>(I); 4463 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(), 4464 EVI->getIndices(), DL, TLI, DT, AC, I); 4465 break; 4466 } 4467 case Instruction::ExtractElement: { 4468 auto *EEI = cast<ExtractElementInst>(I); 4469 Result = SimplifyExtractElementInst( 4470 EEI->getVectorOperand(), EEI->getIndexOperand(), DL, TLI, DT, AC, I); 4471 break; 4472 } 4473 case Instruction::PHI: 4474 Result = SimplifyPHINode(cast<PHINode>(I), Query(DL, TLI, DT, AC, I)); 4475 break; 4476 case Instruction::Call: { 4477 CallSite CS(cast<CallInst>(I)); 4478 Result = SimplifyCall(CS.getCalledValue(), CS.arg_begin(), CS.arg_end(), DL, 4479 TLI, DT, AC, I); 4480 break; 4481 } 4482 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc: 4483 #include "llvm/IR/Instruction.def" 4484 #undef HANDLE_CAST_INST 4485 Result = SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), 4486 DL, TLI, DT, AC, I); 4487 break; 4488 } 4489 4490 // In general, it is possible for computeKnownBits to determine all bits in a 4491 // value even when the operands are not all constants. 4492 if (!Result && I->getType()->isIntOrIntVectorTy()) { 4493 unsigned BitWidth = I->getType()->getScalarSizeInBits(); 4494 APInt KnownZero(BitWidth, 0); 4495 APInt KnownOne(BitWidth, 0); 4496 computeKnownBits(I, KnownZero, KnownOne, DL, /*Depth*/0, AC, I, DT); 4497 if ((KnownZero | KnownOne).isAllOnesValue()) 4498 Result = ConstantInt::get(I->getType(), KnownOne); 4499 } 4500 4501 /// If called on unreachable code, the above logic may report that the 4502 /// instruction simplified to itself. Make life easier for users by 4503 /// detecting that case here, returning a safe value instead. 4504 return Result == I ? UndefValue::get(I->getType()) : Result; 4505 } 4506 4507 /// \brief Implementation of recursive simplification through an instruction's 4508 /// uses. 4509 /// 4510 /// This is the common implementation of the recursive simplification routines. 4511 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to 4512 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of 4513 /// instructions to process and attempt to simplify it using 4514 /// InstructionSimplify. 4515 /// 4516 /// This routine returns 'true' only when *it* simplifies something. The passed 4517 /// in simplified value does not count toward this. 4518 static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, 4519 const TargetLibraryInfo *TLI, 4520 const DominatorTree *DT, 4521 AssumptionCache *AC) { 4522 bool Simplified = false; 4523 SmallSetVector<Instruction *, 8> Worklist; 4524 const DataLayout &DL = I->getModule()->getDataLayout(); 4525 4526 // If we have an explicit value to collapse to, do that round of the 4527 // simplification loop by hand initially. 4528 if (SimpleV) { 4529 for (User *U : I->users()) 4530 if (U != I) 4531 Worklist.insert(cast<Instruction>(U)); 4532 4533 // Replace the instruction with its simplified value. 4534 I->replaceAllUsesWith(SimpleV); 4535 4536 // Gracefully handle edge cases where the instruction is not wired into any 4537 // parent block. 4538 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) && 4539 !I->mayHaveSideEffects()) 4540 I->eraseFromParent(); 4541 } else { 4542 Worklist.insert(I); 4543 } 4544 4545 // Note that we must test the size on each iteration, the worklist can grow. 4546 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) { 4547 I = Worklist[Idx]; 4548 4549 // See if this instruction simplifies. 4550 SimpleV = SimplifyInstruction(I, DL, TLI, DT, AC); 4551 if (!SimpleV) 4552 continue; 4553 4554 Simplified = true; 4555 4556 // Stash away all the uses of the old instruction so we can check them for 4557 // recursive simplifications after a RAUW. This is cheaper than checking all 4558 // uses of To on the recursive step in most cases. 4559 for (User *U : I->users()) 4560 Worklist.insert(cast<Instruction>(U)); 4561 4562 // Replace the instruction with its simplified value. 4563 I->replaceAllUsesWith(SimpleV); 4564 4565 // Gracefully handle edge cases where the instruction is not wired into any 4566 // parent block. 4567 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) && 4568 !I->mayHaveSideEffects()) 4569 I->eraseFromParent(); 4570 } 4571 return Simplified; 4572 } 4573 4574 bool llvm::recursivelySimplifyInstruction(Instruction *I, 4575 const TargetLibraryInfo *TLI, 4576 const DominatorTree *DT, 4577 AssumptionCache *AC) { 4578 return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC); 4579 } 4580 4581 bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, 4582 const TargetLibraryInfo *TLI, 4583 const DominatorTree *DT, 4584 AssumptionCache *AC) { 4585 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!"); 4586 assert(SimpleV && "Must provide a simplified value."); 4587 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC); 4588 } 4589