1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements routines for folding instructions into simpler forms 11 // that do not require creating new instructions. This does constant folding 12 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either 13 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value 14 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been 15 // simplified: This is usually true and assuming it simplifies the logic (if 16 // they have not been simplified then results are correct but maybe suboptimal). 17 // 18 //===----------------------------------------------------------------------===// 19 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/ADT/SetVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/AliasAnalysis.h" 24 #include "llvm/Analysis/AssumptionCache.h" 25 #include "llvm/Analysis/CaptureTracking.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/LoopAnalysisManager.h" 28 #include "llvm/Analysis/MemoryBuiltins.h" 29 #include "llvm/Analysis/OptimizationDiagnosticInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/Analysis/VectorUtils.h" 32 #include "llvm/IR/ConstantRange.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/GetElementPtrTypeIterator.h" 36 #include "llvm/IR/GlobalAlias.h" 37 #include "llvm/IR/Operator.h" 38 #include "llvm/IR/PatternMatch.h" 39 #include "llvm/IR/ValueHandle.h" 40 #include "llvm/Support/KnownBits.h" 41 #include <algorithm> 42 using namespace llvm; 43 using namespace llvm::PatternMatch; 44 45 #define DEBUG_TYPE "instsimplify" 46 47 enum { RecursionLimit = 3 }; 48 49 STATISTIC(NumExpand, "Number of expansions"); 50 STATISTIC(NumReassoc, "Number of reassociations"); 51 52 static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned); 53 static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, 54 unsigned); 55 static Value *SimplifyFPBinOp(unsigned, Value *, Value *, const FastMathFlags &, 56 const SimplifyQuery &, unsigned); 57 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &, 58 unsigned); 59 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 60 const SimplifyQuery &Q, unsigned MaxRecurse); 61 static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned); 62 static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned); 63 static Value *SimplifyCastInst(unsigned, Value *, Type *, 64 const SimplifyQuery &, unsigned); 65 66 /// For a boolean type or a vector of boolean type, return false or a vector 67 /// with every element false. 68 static Constant *getFalse(Type *Ty) { 69 return ConstantInt::getFalse(Ty); 70 } 71 72 /// For a boolean type or a vector of boolean type, return true or a vector 73 /// with every element true. 74 static Constant *getTrue(Type *Ty) { 75 return ConstantInt::getTrue(Ty); 76 } 77 78 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"? 79 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, 80 Value *RHS) { 81 CmpInst *Cmp = dyn_cast<CmpInst>(V); 82 if (!Cmp) 83 return false; 84 CmpInst::Predicate CPred = Cmp->getPredicate(); 85 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1); 86 if (CPred == Pred && CLHS == LHS && CRHS == RHS) 87 return true; 88 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS && 89 CRHS == LHS; 90 } 91 92 /// Does the given value dominate the specified phi node? 93 static bool ValueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) { 94 Instruction *I = dyn_cast<Instruction>(V); 95 if (!I) 96 // Arguments and constants dominate all instructions. 97 return true; 98 99 // If we are processing instructions (and/or basic blocks) that have not been 100 // fully added to a function, the parent nodes may still be null. Simply 101 // return the conservative answer in these cases. 102 if (!I->getParent() || !P->getParent() || !I->getParent()->getParent()) 103 return false; 104 105 // If we have a DominatorTree then do a precise test. 106 if (DT) { 107 if (!DT->isReachableFromEntry(P->getParent())) 108 return true; 109 if (!DT->isReachableFromEntry(I->getParent())) 110 return false; 111 return DT->dominates(I, P); 112 } 113 114 // Otherwise, if the instruction is in the entry block and is not an invoke, 115 // then it obviously dominates all phi nodes. 116 if (I->getParent() == &I->getParent()->getParent()->getEntryBlock() && 117 !isa<InvokeInst>(I)) 118 return true; 119 120 return false; 121 } 122 123 /// Simplify "A op (B op' C)" by distributing op over op', turning it into 124 /// "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is 125 /// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS. 126 /// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)". 127 /// Returns the simplified value, or null if no simplification was performed. 128 static Value *ExpandBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, 129 Instruction::BinaryOps OpcodeToExpand, 130 const SimplifyQuery &Q, unsigned MaxRecurse) { 131 // Recursion is always used, so bail out at once if we already hit the limit. 132 if (!MaxRecurse--) 133 return nullptr; 134 135 // Check whether the expression has the form "(A op' B) op C". 136 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS)) 137 if (Op0->getOpcode() == OpcodeToExpand) { 138 // It does! Try turning it into "(A op C) op' (B op C)". 139 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 140 // Do "A op C" and "B op C" both simplify? 141 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) 142 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 143 // They do! Return "L op' R" if it simplifies or is already available. 144 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 145 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand) 146 && L == B && R == A)) { 147 ++NumExpand; 148 return LHS; 149 } 150 // Otherwise return "L op' R" if it simplifies. 151 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 152 ++NumExpand; 153 return V; 154 } 155 } 156 } 157 158 // Check whether the expression has the form "A op (B op' C)". 159 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS)) 160 if (Op1->getOpcode() == OpcodeToExpand) { 161 // It does! Try turning it into "(A op B) op' (A op C)". 162 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 163 // Do "A op B" and "A op C" both simplify? 164 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) 165 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) { 166 // They do! Return "L op' R" if it simplifies or is already available. 167 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 168 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand) 169 && L == C && R == B)) { 170 ++NumExpand; 171 return RHS; 172 } 173 // Otherwise return "L op' R" if it simplifies. 174 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 175 ++NumExpand; 176 return V; 177 } 178 } 179 } 180 181 return nullptr; 182 } 183 184 /// Generic simplifications for associative binary operations. 185 /// Returns the simpler value, or null if none was found. 186 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode, 187 Value *LHS, Value *RHS, 188 const SimplifyQuery &Q, 189 unsigned MaxRecurse) { 190 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!"); 191 192 // Recursion is always used, so bail out at once if we already hit the limit. 193 if (!MaxRecurse--) 194 return nullptr; 195 196 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 197 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 198 199 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely. 200 if (Op0 && Op0->getOpcode() == Opcode) { 201 Value *A = Op0->getOperand(0); 202 Value *B = Op0->getOperand(1); 203 Value *C = RHS; 204 205 // Does "B op C" simplify? 206 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 207 // It does! Return "A op V" if it simplifies or is already available. 208 // If V equals B then "A op V" is just the LHS. 209 if (V == B) return LHS; 210 // Otherwise return "A op V" if it simplifies. 211 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) { 212 ++NumReassoc; 213 return W; 214 } 215 } 216 } 217 218 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely. 219 if (Op1 && Op1->getOpcode() == Opcode) { 220 Value *A = LHS; 221 Value *B = Op1->getOperand(0); 222 Value *C = Op1->getOperand(1); 223 224 // Does "A op B" simplify? 225 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) { 226 // It does! Return "V op C" if it simplifies or is already available. 227 // If V equals B then "V op C" is just the RHS. 228 if (V == B) return RHS; 229 // Otherwise return "V op C" if it simplifies. 230 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) { 231 ++NumReassoc; 232 return W; 233 } 234 } 235 } 236 237 // The remaining transforms require commutativity as well as associativity. 238 if (!Instruction::isCommutative(Opcode)) 239 return nullptr; 240 241 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely. 242 if (Op0 && Op0->getOpcode() == Opcode) { 243 Value *A = Op0->getOperand(0); 244 Value *B = Op0->getOperand(1); 245 Value *C = RHS; 246 247 // Does "C op A" simplify? 248 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 249 // It does! Return "V op B" if it simplifies or is already available. 250 // If V equals A then "V op B" is just the LHS. 251 if (V == A) return LHS; 252 // Otherwise return "V op B" if it simplifies. 253 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) { 254 ++NumReassoc; 255 return W; 256 } 257 } 258 } 259 260 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely. 261 if (Op1 && Op1->getOpcode() == Opcode) { 262 Value *A = LHS; 263 Value *B = Op1->getOperand(0); 264 Value *C = Op1->getOperand(1); 265 266 // Does "C op A" simplify? 267 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 268 // It does! Return "B op V" if it simplifies or is already available. 269 // If V equals C then "B op V" is just the RHS. 270 if (V == C) return RHS; 271 // Otherwise return "B op V" if it simplifies. 272 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) { 273 ++NumReassoc; 274 return W; 275 } 276 } 277 } 278 279 return nullptr; 280 } 281 282 /// In the case of a binary operation with a select instruction as an operand, 283 /// try to simplify the binop by seeing whether evaluating it on both branches 284 /// of the select results in the same value. Returns the common value if so, 285 /// otherwise returns null. 286 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, 287 Value *RHS, const SimplifyQuery &Q, 288 unsigned MaxRecurse) { 289 // Recursion is always used, so bail out at once if we already hit the limit. 290 if (!MaxRecurse--) 291 return nullptr; 292 293 SelectInst *SI; 294 if (isa<SelectInst>(LHS)) { 295 SI = cast<SelectInst>(LHS); 296 } else { 297 assert(isa<SelectInst>(RHS) && "No select instruction operand!"); 298 SI = cast<SelectInst>(RHS); 299 } 300 301 // Evaluate the BinOp on the true and false branches of the select. 302 Value *TV; 303 Value *FV; 304 if (SI == LHS) { 305 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse); 306 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse); 307 } else { 308 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse); 309 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse); 310 } 311 312 // If they simplified to the same value, then return the common value. 313 // If they both failed to simplify then return null. 314 if (TV == FV) 315 return TV; 316 317 // If one branch simplified to undef, return the other one. 318 if (TV && isa<UndefValue>(TV)) 319 return FV; 320 if (FV && isa<UndefValue>(FV)) 321 return TV; 322 323 // If applying the operation did not change the true and false select values, 324 // then the result of the binop is the select itself. 325 if (TV == SI->getTrueValue() && FV == SI->getFalseValue()) 326 return SI; 327 328 // If one branch simplified and the other did not, and the simplified 329 // value is equal to the unsimplified one, return the simplified value. 330 // For example, select (cond, X, X & Z) & Z -> X & Z. 331 if ((FV && !TV) || (TV && !FV)) { 332 // Check that the simplified value has the form "X op Y" where "op" is the 333 // same as the original operation. 334 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV); 335 if (Simplified && Simplified->getOpcode() == Opcode) { 336 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS". 337 // We already know that "op" is the same as for the simplified value. See 338 // if the operands match too. If so, return the simplified value. 339 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue(); 340 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS; 341 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch; 342 if (Simplified->getOperand(0) == UnsimplifiedLHS && 343 Simplified->getOperand(1) == UnsimplifiedRHS) 344 return Simplified; 345 if (Simplified->isCommutative() && 346 Simplified->getOperand(1) == UnsimplifiedLHS && 347 Simplified->getOperand(0) == UnsimplifiedRHS) 348 return Simplified; 349 } 350 } 351 352 return nullptr; 353 } 354 355 /// In the case of a comparison with a select instruction, try to simplify the 356 /// comparison by seeing whether both branches of the select result in the same 357 /// value. Returns the common value if so, otherwise returns null. 358 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS, 359 Value *RHS, const SimplifyQuery &Q, 360 unsigned MaxRecurse) { 361 // Recursion is always used, so bail out at once if we already hit the limit. 362 if (!MaxRecurse--) 363 return nullptr; 364 365 // Make sure the select is on the LHS. 366 if (!isa<SelectInst>(LHS)) { 367 std::swap(LHS, RHS); 368 Pred = CmpInst::getSwappedPredicate(Pred); 369 } 370 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!"); 371 SelectInst *SI = cast<SelectInst>(LHS); 372 Value *Cond = SI->getCondition(); 373 Value *TV = SI->getTrueValue(); 374 Value *FV = SI->getFalseValue(); 375 376 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it. 377 // Does "cmp TV, RHS" simplify? 378 Value *TCmp = SimplifyCmpInst(Pred, TV, RHS, Q, MaxRecurse); 379 if (TCmp == Cond) { 380 // It not only simplified, it simplified to the select condition. Replace 381 // it with 'true'. 382 TCmp = getTrue(Cond->getType()); 383 } else if (!TCmp) { 384 // It didn't simplify. However if "cmp TV, RHS" is equal to the select 385 // condition then we can replace it with 'true'. Otherwise give up. 386 if (!isSameCompare(Cond, Pred, TV, RHS)) 387 return nullptr; 388 TCmp = getTrue(Cond->getType()); 389 } 390 391 // Does "cmp FV, RHS" simplify? 392 Value *FCmp = SimplifyCmpInst(Pred, FV, RHS, Q, MaxRecurse); 393 if (FCmp == Cond) { 394 // It not only simplified, it simplified to the select condition. Replace 395 // it with 'false'. 396 FCmp = getFalse(Cond->getType()); 397 } else if (!FCmp) { 398 // It didn't simplify. However if "cmp FV, RHS" is equal to the select 399 // condition then we can replace it with 'false'. Otherwise give up. 400 if (!isSameCompare(Cond, Pred, FV, RHS)) 401 return nullptr; 402 FCmp = getFalse(Cond->getType()); 403 } 404 405 // If both sides simplified to the same value, then use it as the result of 406 // the original comparison. 407 if (TCmp == FCmp) 408 return TCmp; 409 410 // The remaining cases only make sense if the select condition has the same 411 // type as the result of the comparison, so bail out if this is not so. 412 if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy()) 413 return nullptr; 414 // If the false value simplified to false, then the result of the compare 415 // is equal to "Cond && TCmp". This also catches the case when the false 416 // value simplified to false and the true value to true, returning "Cond". 417 if (match(FCmp, m_Zero())) 418 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse)) 419 return V; 420 // If the true value simplified to true, then the result of the compare 421 // is equal to "Cond || FCmp". 422 if (match(TCmp, m_One())) 423 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse)) 424 return V; 425 // Finally, if the false value simplified to true and the true value to 426 // false, then the result of the compare is equal to "!Cond". 427 if (match(FCmp, m_One()) && match(TCmp, m_Zero())) 428 if (Value *V = 429 SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()), 430 Q, MaxRecurse)) 431 return V; 432 433 return nullptr; 434 } 435 436 /// In the case of a binary operation with an operand that is a PHI instruction, 437 /// try to simplify the binop by seeing whether evaluating it on the incoming 438 /// phi values yields the same result for every value. If so returns the common 439 /// value, otherwise returns null. 440 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, 441 Value *RHS, const SimplifyQuery &Q, 442 unsigned MaxRecurse) { 443 // Recursion is always used, so bail out at once if we already hit the limit. 444 if (!MaxRecurse--) 445 return nullptr; 446 447 PHINode *PI; 448 if (isa<PHINode>(LHS)) { 449 PI = cast<PHINode>(LHS); 450 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 451 if (!ValueDominatesPHI(RHS, PI, Q.DT)) 452 return nullptr; 453 } else { 454 assert(isa<PHINode>(RHS) && "No PHI instruction operand!"); 455 PI = cast<PHINode>(RHS); 456 // Bail out if LHS and the phi may be mutually interdependent due to a loop. 457 if (!ValueDominatesPHI(LHS, PI, Q.DT)) 458 return nullptr; 459 } 460 461 // Evaluate the BinOp on the incoming phi values. 462 Value *CommonValue = nullptr; 463 for (Value *Incoming : PI->incoming_values()) { 464 // If the incoming value is the phi node itself, it can safely be skipped. 465 if (Incoming == PI) continue; 466 Value *V = PI == LHS ? 467 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) : 468 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse); 469 // If the operation failed to simplify, or simplified to a different value 470 // to previously, then give up. 471 if (!V || (CommonValue && V != CommonValue)) 472 return nullptr; 473 CommonValue = V; 474 } 475 476 return CommonValue; 477 } 478 479 /// In the case of a comparison with a PHI instruction, try to simplify the 480 /// comparison by seeing whether comparing with all of the incoming phi values 481 /// yields the same result every time. If so returns the common result, 482 /// otherwise returns null. 483 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS, 484 const SimplifyQuery &Q, unsigned MaxRecurse) { 485 // Recursion is always used, so bail out at once if we already hit the limit. 486 if (!MaxRecurse--) 487 return nullptr; 488 489 // Make sure the phi is on the LHS. 490 if (!isa<PHINode>(LHS)) { 491 std::swap(LHS, RHS); 492 Pred = CmpInst::getSwappedPredicate(Pred); 493 } 494 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!"); 495 PHINode *PI = cast<PHINode>(LHS); 496 497 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 498 if (!ValueDominatesPHI(RHS, PI, Q.DT)) 499 return nullptr; 500 501 // Evaluate the BinOp on the incoming phi values. 502 Value *CommonValue = nullptr; 503 for (Value *Incoming : PI->incoming_values()) { 504 // If the incoming value is the phi node itself, it can safely be skipped. 505 if (Incoming == PI) continue; 506 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q, MaxRecurse); 507 // If the operation failed to simplify, or simplified to a different value 508 // to previously, then give up. 509 if (!V || (CommonValue && V != CommonValue)) 510 return nullptr; 511 CommonValue = V; 512 } 513 514 return CommonValue; 515 } 516 517 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode, 518 Value *&Op0, Value *&Op1, 519 const SimplifyQuery &Q) { 520 if (auto *CLHS = dyn_cast<Constant>(Op0)) { 521 if (auto *CRHS = dyn_cast<Constant>(Op1)) 522 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL); 523 524 // Canonicalize the constant to the RHS if this is a commutative operation. 525 if (Instruction::isCommutative(Opcode)) 526 std::swap(Op0, Op1); 527 } 528 return nullptr; 529 } 530 531 /// Given operands for an Add, see if we can fold the result. 532 /// If not, this returns null. 533 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 534 const SimplifyQuery &Q, unsigned MaxRecurse) { 535 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q)) 536 return C; 537 538 // X + undef -> undef 539 if (match(Op1, m_Undef())) 540 return Op1; 541 542 // X + 0 -> X 543 if (match(Op1, m_Zero())) 544 return Op0; 545 546 // X + (Y - X) -> Y 547 // (Y - X) + X -> Y 548 // Eg: X + -X -> 0 549 Value *Y = nullptr; 550 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) || 551 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1)))) 552 return Y; 553 554 // X + ~X -> -1 since ~X = -X-1 555 Type *Ty = Op0->getType(); 556 if (match(Op0, m_Not(m_Specific(Op1))) || 557 match(Op1, m_Not(m_Specific(Op0)))) 558 return Constant::getAllOnesValue(Ty); 559 560 // add nsw/nuw (xor Y, signmask), signmask --> Y 561 // The no-wrapping add guarantees that the top bit will be set by the add. 562 // Therefore, the xor must be clearing the already set sign bit of Y. 563 if ((isNSW || isNUW) && match(Op1, m_SignMask()) && 564 match(Op0, m_Xor(m_Value(Y), m_SignMask()))) 565 return Y; 566 567 /// i1 add -> xor. 568 if (MaxRecurse && Op0->getType()->getScalarType()->isIntegerTy(1)) 569 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 570 return V; 571 572 // Try some generic simplifications for associative operations. 573 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, 574 MaxRecurse)) 575 return V; 576 577 // Threading Add over selects and phi nodes is pointless, so don't bother. 578 // Threading over the select in "A + select(cond, B, C)" means evaluating 579 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and 580 // only if B and C are equal. If B and C are equal then (since we assume 581 // that operands have already been simplified) "select(cond, B, C)" should 582 // have been simplified to the common value of B and C already. Analysing 583 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly 584 // for threading over phi nodes. 585 586 return nullptr; 587 } 588 589 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 590 const SimplifyQuery &Query) { 591 return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query, RecursionLimit); 592 } 593 594 /// \brief Compute the base pointer and cumulative constant offsets for V. 595 /// 596 /// This strips all constant offsets off of V, leaving it the base pointer, and 597 /// accumulates the total constant offset applied in the returned constant. It 598 /// returns 0 if V is not a pointer, and returns the constant '0' if there are 599 /// no constant offsets applied. 600 /// 601 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't 602 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc. 603 /// folding. 604 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, 605 bool AllowNonInbounds = false) { 606 assert(V->getType()->getScalarType()->isPointerTy()); 607 608 Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType(); 609 APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth()); 610 611 // Even though we don't look through PHI nodes, we could be called on an 612 // instruction in an unreachable block, which may be on a cycle. 613 SmallPtrSet<Value *, 4> Visited; 614 Visited.insert(V); 615 do { 616 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 617 if ((!AllowNonInbounds && !GEP->isInBounds()) || 618 !GEP->accumulateConstantOffset(DL, Offset)) 619 break; 620 V = GEP->getPointerOperand(); 621 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 622 V = cast<Operator>(V)->getOperand(0); 623 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 624 if (GA->isInterposable()) 625 break; 626 V = GA->getAliasee(); 627 } else { 628 if (auto CS = CallSite(V)) 629 if (Value *RV = CS.getReturnedArgOperand()) { 630 V = RV; 631 continue; 632 } 633 break; 634 } 635 assert(V->getType()->getScalarType()->isPointerTy() && 636 "Unexpected operand type!"); 637 } while (Visited.insert(V).second); 638 639 Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset); 640 if (V->getType()->isVectorTy()) 641 return ConstantVector::getSplat(V->getType()->getVectorNumElements(), 642 OffsetIntPtr); 643 return OffsetIntPtr; 644 } 645 646 /// \brief Compute the constant difference between two pointer values. 647 /// If the difference is not a constant, returns zero. 648 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS, 649 Value *RHS) { 650 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 651 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 652 653 // If LHS and RHS are not related via constant offsets to the same base 654 // value, there is nothing we can do here. 655 if (LHS != RHS) 656 return nullptr; 657 658 // Otherwise, the difference of LHS - RHS can be computed as: 659 // LHS - RHS 660 // = (LHSOffset + Base) - (RHSOffset + Base) 661 // = LHSOffset - RHSOffset 662 return ConstantExpr::getSub(LHSOffset, RHSOffset); 663 } 664 665 /// Given operands for a Sub, see if we can fold the result. 666 /// If not, this returns null. 667 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 668 const SimplifyQuery &Q, unsigned MaxRecurse) { 669 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q)) 670 return C; 671 672 // X - undef -> undef 673 // undef - X -> undef 674 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 675 return UndefValue::get(Op0->getType()); 676 677 // X - 0 -> X 678 if (match(Op1, m_Zero())) 679 return Op0; 680 681 // X - X -> 0 682 if (Op0 == Op1) 683 return Constant::getNullValue(Op0->getType()); 684 685 // Is this a negation? 686 if (match(Op0, m_Zero())) { 687 // 0 - X -> 0 if the sub is NUW. 688 if (isNUW) 689 return Op0; 690 691 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 692 if (Known.Zero.isMaxSignedValue()) { 693 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then 694 // Op1 must be 0 because negating the minimum signed value is undefined. 695 if (isNSW) 696 return Op0; 697 698 // 0 - X -> X if X is 0 or the minimum signed value. 699 return Op1; 700 } 701 } 702 703 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies. 704 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X 705 Value *X = nullptr, *Y = nullptr, *Z = Op1; 706 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z 707 // See if "V === Y - Z" simplifies. 708 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1)) 709 // It does! Now see if "X + V" simplifies. 710 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) { 711 // It does, we successfully reassociated! 712 ++NumReassoc; 713 return W; 714 } 715 // See if "V === X - Z" simplifies. 716 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 717 // It does! Now see if "Y + V" simplifies. 718 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) { 719 // It does, we successfully reassociated! 720 ++NumReassoc; 721 return W; 722 } 723 } 724 725 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies. 726 // For example, X - (X + 1) -> -1 727 X = Op0; 728 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z) 729 // See if "V === X - Y" simplifies. 730 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 731 // It does! Now see if "V - Z" simplifies. 732 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) { 733 // It does, we successfully reassociated! 734 ++NumReassoc; 735 return W; 736 } 737 // See if "V === X - Z" simplifies. 738 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 739 // It does! Now see if "V - Y" simplifies. 740 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) { 741 // It does, we successfully reassociated! 742 ++NumReassoc; 743 return W; 744 } 745 } 746 747 // Z - (X - Y) -> (Z - X) + Y if everything simplifies. 748 // For example, X - (X - Y) -> Y. 749 Z = Op0; 750 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y) 751 // See if "V === Z - X" simplifies. 752 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1)) 753 // It does! Now see if "V + Y" simplifies. 754 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) { 755 // It does, we successfully reassociated! 756 ++NumReassoc; 757 return W; 758 } 759 760 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies. 761 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) && 762 match(Op1, m_Trunc(m_Value(Y)))) 763 if (X->getType() == Y->getType()) 764 // See if "V === X - Y" simplifies. 765 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 766 // It does! Now see if "trunc V" simplifies. 767 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(), 768 Q, MaxRecurse - 1)) 769 // It does, return the simplified "trunc V". 770 return W; 771 772 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...). 773 if (match(Op0, m_PtrToInt(m_Value(X))) && 774 match(Op1, m_PtrToInt(m_Value(Y)))) 775 if (Constant *Result = computePointerDifference(Q.DL, X, Y)) 776 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true); 777 778 // i1 sub -> xor. 779 if (MaxRecurse && Op0->getType()->getScalarType()->isIntegerTy(1)) 780 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 781 return V; 782 783 // Threading Sub over selects and phi nodes is pointless, so don't bother. 784 // Threading over the select in "A - select(cond, B, C)" means evaluating 785 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and 786 // only if B and C are equal. If B and C are equal then (since we assume 787 // that operands have already been simplified) "select(cond, B, C)" should 788 // have been simplified to the common value of B and C already. Analysing 789 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly 790 // for threading over phi nodes. 791 792 return nullptr; 793 } 794 795 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 796 const SimplifyQuery &Q) { 797 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit); 798 } 799 800 /// Given operands for an FAdd, see if we can fold the result. If not, this 801 /// returns null. 802 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 803 const SimplifyQuery &Q, unsigned MaxRecurse) { 804 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q)) 805 return C; 806 807 // fadd X, -0 ==> X 808 if (match(Op1, m_NegZero())) 809 return Op0; 810 811 // fadd X, 0 ==> X, when we know X is not -0 812 if (match(Op1, m_Zero()) && 813 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) 814 return Op0; 815 816 // fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0 817 // where nnan and ninf have to occur at least once somewhere in this 818 // expression 819 Value *SubOp = nullptr; 820 if (match(Op1, m_FSub(m_AnyZero(), m_Specific(Op0)))) 821 SubOp = Op1; 822 else if (match(Op0, m_FSub(m_AnyZero(), m_Specific(Op1)))) 823 SubOp = Op0; 824 if (SubOp) { 825 Instruction *FSub = cast<Instruction>(SubOp); 826 if ((FMF.noNaNs() || FSub->hasNoNaNs()) && 827 (FMF.noInfs() || FSub->hasNoInfs())) 828 return Constant::getNullValue(Op0->getType()); 829 } 830 831 return nullptr; 832 } 833 834 /// Given operands for an FSub, see if we can fold the result. If not, this 835 /// returns null. 836 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 837 const SimplifyQuery &Q, unsigned MaxRecurse) { 838 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q)) 839 return C; 840 841 // fsub X, 0 ==> X 842 if (match(Op1, m_Zero())) 843 return Op0; 844 845 // fsub X, -0 ==> X, when we know X is not -0 846 if (match(Op1, m_NegZero()) && 847 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) 848 return Op0; 849 850 // fsub -0.0, (fsub -0.0, X) ==> X 851 Value *X; 852 if (match(Op0, m_NegZero()) && match(Op1, m_FSub(m_NegZero(), m_Value(X)))) 853 return X; 854 855 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored. 856 if (FMF.noSignedZeros() && match(Op0, m_AnyZero()) && 857 match(Op1, m_FSub(m_AnyZero(), m_Value(X)))) 858 return X; 859 860 // fsub nnan x, x ==> 0.0 861 if (FMF.noNaNs() && Op0 == Op1) 862 return Constant::getNullValue(Op0->getType()); 863 864 return nullptr; 865 } 866 867 /// Given the operands for an FMul, see if we can fold the result 868 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, 869 const SimplifyQuery &Q, unsigned MaxRecurse) { 870 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q)) 871 return C; 872 873 // fmul X, 1.0 ==> X 874 if (match(Op1, m_FPOne())) 875 return Op0; 876 877 // fmul nnan nsz X, 0 ==> 0 878 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZero())) 879 return Op1; 880 881 return nullptr; 882 } 883 884 /// Given operands for a Mul, see if we can fold the result. 885 /// If not, this returns null. 886 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 887 unsigned MaxRecurse) { 888 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q)) 889 return C; 890 891 // X * undef -> 0 892 if (match(Op1, m_Undef())) 893 return Constant::getNullValue(Op0->getType()); 894 895 // X * 0 -> 0 896 if (match(Op1, m_Zero())) 897 return Op1; 898 899 // X * 1 -> X 900 if (match(Op1, m_One())) 901 return Op0; 902 903 // (X / Y) * Y -> X if the division is exact. 904 Value *X = nullptr; 905 if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y 906 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y) 907 return X; 908 909 // i1 mul -> and. 910 if (MaxRecurse && Op0->getType()->getScalarType()->isIntegerTy(1)) 911 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1)) 912 return V; 913 914 // Try some generic simplifications for associative operations. 915 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, 916 MaxRecurse)) 917 return V; 918 919 // Mul distributes over Add. Try some generic simplifications based on this. 920 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add, 921 Q, MaxRecurse)) 922 return V; 923 924 // If the operation is with the result of a select instruction, check whether 925 // operating on either branch of the select always yields the same value. 926 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 927 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, 928 MaxRecurse)) 929 return V; 930 931 // If the operation is with the result of a phi instruction, check whether 932 // operating on all incoming values of the phi always yields the same value. 933 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 934 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, 935 MaxRecurse)) 936 return V; 937 938 return nullptr; 939 } 940 941 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 942 const SimplifyQuery &Q) { 943 return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit); 944 } 945 946 947 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 948 const SimplifyQuery &Q) { 949 return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit); 950 } 951 952 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, 953 const SimplifyQuery &Q) { 954 return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit); 955 } 956 957 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 958 return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit); 959 } 960 961 /// Check for common or similar folds of integer division or integer remainder. 962 static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) { 963 Type *Ty = Op0->getType(); 964 965 // X / undef -> undef 966 // X % undef -> undef 967 if (match(Op1, m_Undef())) 968 return Op1; 969 970 // X / 0 -> undef 971 // X % 0 -> undef 972 // We don't need to preserve faults! 973 if (match(Op1, m_Zero())) 974 return UndefValue::get(Ty); 975 976 // If any element of a constant divisor vector is zero, the whole op is undef. 977 auto *Op1C = dyn_cast<Constant>(Op1); 978 if (Op1C && Ty->isVectorTy()) { 979 unsigned NumElts = Ty->getVectorNumElements(); 980 for (unsigned i = 0; i != NumElts; ++i) { 981 Constant *Elt = Op1C->getAggregateElement(i); 982 if (Elt && Elt->isNullValue()) 983 return UndefValue::get(Ty); 984 } 985 } 986 987 // undef / X -> 0 988 // undef % X -> 0 989 if (match(Op0, m_Undef())) 990 return Constant::getNullValue(Ty); 991 992 // 0 / X -> 0 993 // 0 % X -> 0 994 if (match(Op0, m_Zero())) 995 return Op0; 996 997 // X / X -> 1 998 // X % X -> 0 999 if (Op0 == Op1) 1000 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty); 1001 1002 // X / 1 -> X 1003 // X % 1 -> 0 1004 // If this is a boolean op (single-bit element type), we can't have 1005 // division-by-zero or remainder-by-zero, so assume the divisor is 1. 1006 if (match(Op1, m_One()) || Ty->getScalarType()->isIntegerTy(1)) 1007 return IsDiv ? Op0 : Constant::getNullValue(Ty); 1008 1009 return nullptr; 1010 } 1011 1012 /// Given operands for an SDiv or UDiv, see if we can fold the result. 1013 /// If not, this returns null. 1014 static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 1015 const SimplifyQuery &Q, unsigned MaxRecurse) { 1016 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1017 return C; 1018 1019 if (Value *V = simplifyDivRem(Op0, Op1, true)) 1020 return V; 1021 1022 bool isSigned = Opcode == Instruction::SDiv; 1023 1024 // (X * Y) / Y -> X if the multiplication does not overflow. 1025 Value *X = nullptr, *Y = nullptr; 1026 if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) { 1027 if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1 1028 OverflowingBinaryOperator *Mul = cast<OverflowingBinaryOperator>(Op0); 1029 // If the Mul knows it does not overflow, then we are good to go. 1030 if ((isSigned && Mul->hasNoSignedWrap()) || 1031 (!isSigned && Mul->hasNoUnsignedWrap())) 1032 return X; 1033 // If X has the form X = A / Y then X * Y cannot overflow. 1034 if (BinaryOperator *Div = dyn_cast<BinaryOperator>(X)) 1035 if (Div->getOpcode() == Opcode && Div->getOperand(1) == Y) 1036 return X; 1037 } 1038 1039 // (X rem Y) / Y -> 0 1040 if ((isSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 1041 (!isSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 1042 return Constant::getNullValue(Op0->getType()); 1043 1044 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow 1045 ConstantInt *C1, *C2; 1046 if (!isSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) && 1047 match(Op1, m_ConstantInt(C2))) { 1048 bool Overflow; 1049 (void)C1->getValue().umul_ov(C2->getValue(), Overflow); 1050 if (Overflow) 1051 return Constant::getNullValue(Op0->getType()); 1052 } 1053 1054 // If the operation is with the result of a select instruction, check whether 1055 // operating on either branch of the select always yields the same value. 1056 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1057 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1058 return V; 1059 1060 // If the operation is with the result of a phi instruction, check whether 1061 // operating on all incoming values of the phi always yields the same value. 1062 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1063 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1064 return V; 1065 1066 return nullptr; 1067 } 1068 1069 /// Given operands for an SDiv, see if we can fold the result. 1070 /// If not, this returns null. 1071 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1072 unsigned MaxRecurse) { 1073 if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse)) 1074 return V; 1075 1076 return nullptr; 1077 } 1078 1079 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1080 return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit); 1081 } 1082 1083 /// Given operands for a UDiv, see if we can fold the result. 1084 /// If not, this returns null. 1085 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1086 unsigned MaxRecurse) { 1087 if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse)) 1088 return V; 1089 1090 // udiv %V, C -> 0 if %V < C 1091 if (MaxRecurse) { 1092 if (Constant *C = dyn_cast_or_null<Constant>(SimplifyICmpInst( 1093 ICmpInst::ICMP_ULT, Op0, Op1, Q, MaxRecurse - 1))) { 1094 if (C->isAllOnesValue()) { 1095 return Constant::getNullValue(Op0->getType()); 1096 } 1097 } 1098 } 1099 1100 return nullptr; 1101 } 1102 1103 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1104 return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit); 1105 } 1106 1107 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, 1108 const SimplifyQuery &Q, unsigned) { 1109 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q)) 1110 return C; 1111 1112 // undef / X -> undef (the undef could be a snan). 1113 if (match(Op0, m_Undef())) 1114 return Op0; 1115 1116 // X / undef -> undef 1117 if (match(Op1, m_Undef())) 1118 return Op1; 1119 1120 // X / 1.0 -> X 1121 if (match(Op1, m_FPOne())) 1122 return Op0; 1123 1124 // 0 / X -> 0 1125 // Requires that NaNs are off (X could be zero) and signed zeroes are 1126 // ignored (X could be positive or negative, so the output sign is unknown). 1127 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero())) 1128 return Op0; 1129 1130 if (FMF.noNaNs()) { 1131 // X / X -> 1.0 is legal when NaNs are ignored. 1132 if (Op0 == Op1) 1133 return ConstantFP::get(Op0->getType(), 1.0); 1134 1135 // -X / X -> -1.0 and 1136 // X / -X -> -1.0 are legal when NaNs are ignored. 1137 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored. 1138 if ((BinaryOperator::isFNeg(Op0, /*IgnoreZeroSign=*/true) && 1139 BinaryOperator::getFNegArgument(Op0) == Op1) || 1140 (BinaryOperator::isFNeg(Op1, /*IgnoreZeroSign=*/true) && 1141 BinaryOperator::getFNegArgument(Op1) == Op0)) 1142 return ConstantFP::get(Op0->getType(), -1.0); 1143 } 1144 1145 return nullptr; 1146 } 1147 1148 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, 1149 const SimplifyQuery &Q) { 1150 return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit); 1151 } 1152 1153 /// Given operands for an SRem or URem, see if we can fold the result. 1154 /// If not, this returns null. 1155 static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 1156 const SimplifyQuery &Q, unsigned MaxRecurse) { 1157 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1158 return C; 1159 1160 if (Value *V = simplifyDivRem(Op0, Op1, false)) 1161 return V; 1162 1163 // (X % Y) % Y -> X % Y 1164 if ((Opcode == Instruction::SRem && 1165 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 1166 (Opcode == Instruction::URem && 1167 match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 1168 return Op0; 1169 1170 // If the operation is with the result of a select instruction, check whether 1171 // operating on either branch of the select always yields the same value. 1172 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1173 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1174 return V; 1175 1176 // If the operation is with the result of a phi instruction, check whether 1177 // operating on all incoming values of the phi always yields the same value. 1178 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1179 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1180 return V; 1181 1182 return nullptr; 1183 } 1184 1185 /// Given operands for an SRem, see if we can fold the result. 1186 /// If not, this returns null. 1187 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1188 unsigned MaxRecurse) { 1189 if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse)) 1190 return V; 1191 1192 return nullptr; 1193 } 1194 1195 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1196 return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit); 1197 } 1198 1199 /// Given operands for a URem, see if we can fold the result. 1200 /// If not, this returns null. 1201 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1202 unsigned MaxRecurse) { 1203 if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse)) 1204 return V; 1205 1206 // urem %V, C -> %V if %V < C 1207 if (MaxRecurse) { 1208 if (Constant *C = dyn_cast_or_null<Constant>(SimplifyICmpInst( 1209 ICmpInst::ICMP_ULT, Op0, Op1, Q, MaxRecurse - 1))) { 1210 if (C->isAllOnesValue()) { 1211 return Op0; 1212 } 1213 } 1214 } 1215 1216 return nullptr; 1217 } 1218 1219 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1220 return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit); 1221 } 1222 1223 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, 1224 const SimplifyQuery &Q, unsigned) { 1225 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q)) 1226 return C; 1227 1228 // undef % X -> undef (the undef could be a snan). 1229 if (match(Op0, m_Undef())) 1230 return Op0; 1231 1232 // X % undef -> undef 1233 if (match(Op1, m_Undef())) 1234 return Op1; 1235 1236 // 0 % X -> 0 1237 // Requires that NaNs are off (X could be zero) and signed zeroes are 1238 // ignored (X could be positive or negative, so the output sign is unknown). 1239 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero())) 1240 return Op0; 1241 1242 return nullptr; 1243 } 1244 1245 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, 1246 const SimplifyQuery &Q) { 1247 return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit); 1248 } 1249 1250 /// Returns true if a shift by \c Amount always yields undef. 1251 static bool isUndefShift(Value *Amount) { 1252 Constant *C = dyn_cast<Constant>(Amount); 1253 if (!C) 1254 return false; 1255 1256 // X shift by undef -> undef because it may shift by the bitwidth. 1257 if (isa<UndefValue>(C)) 1258 return true; 1259 1260 // Shifting by the bitwidth or more is undefined. 1261 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1262 if (CI->getValue().getLimitedValue() >= 1263 CI->getType()->getScalarSizeInBits()) 1264 return true; 1265 1266 // If all lanes of a vector shift are undefined the whole shift is. 1267 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) { 1268 for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I) 1269 if (!isUndefShift(C->getAggregateElement(I))) 1270 return false; 1271 return true; 1272 } 1273 1274 return false; 1275 } 1276 1277 /// Given operands for an Shl, LShr or AShr, see if we can fold the result. 1278 /// If not, this returns null. 1279 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0, 1280 Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse) { 1281 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1282 return C; 1283 1284 // 0 shift by X -> 0 1285 if (match(Op0, m_Zero())) 1286 return Op0; 1287 1288 // X shift by 0 -> X 1289 if (match(Op1, m_Zero())) 1290 return Op0; 1291 1292 // Fold undefined shifts. 1293 if (isUndefShift(Op1)) 1294 return UndefValue::get(Op0->getType()); 1295 1296 // If the operation is with the result of a select instruction, check whether 1297 // operating on either branch of the select always yields the same value. 1298 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1299 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1300 return V; 1301 1302 // If the operation is with the result of a phi instruction, check whether 1303 // operating on all incoming values of the phi always yields the same value. 1304 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1305 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1306 return V; 1307 1308 // If any bits in the shift amount make that value greater than or equal to 1309 // the number of bits in the type, the shift is undefined. 1310 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1311 if (Known.One.getLimitedValue() >= Known.getBitWidth()) 1312 return UndefValue::get(Op0->getType()); 1313 1314 // If all valid bits in the shift amount are known zero, the first operand is 1315 // unchanged. 1316 unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth()); 1317 if (Known.countMinTrailingZeros() >= NumValidShiftBits) 1318 return Op0; 1319 1320 return nullptr; 1321 } 1322 1323 /// \brief Given operands for an Shl, LShr or AShr, see if we can 1324 /// fold the result. If not, this returns null. 1325 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, 1326 Value *Op1, bool isExact, const SimplifyQuery &Q, 1327 unsigned MaxRecurse) { 1328 if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse)) 1329 return V; 1330 1331 // X >> X -> 0 1332 if (Op0 == Op1) 1333 return Constant::getNullValue(Op0->getType()); 1334 1335 // undef >> X -> 0 1336 // undef >> X -> undef (if it's exact) 1337 if (match(Op0, m_Undef())) 1338 return isExact ? Op0 : Constant::getNullValue(Op0->getType()); 1339 1340 // The low bit cannot be shifted out of an exact shift if it is set. 1341 if (isExact) { 1342 KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT); 1343 if (Op0Known.One[0]) 1344 return Op0; 1345 } 1346 1347 return nullptr; 1348 } 1349 1350 /// Given operands for an Shl, see if we can fold the result. 1351 /// If not, this returns null. 1352 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 1353 const SimplifyQuery &Q, unsigned MaxRecurse) { 1354 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse)) 1355 return V; 1356 1357 // undef << X -> 0 1358 // undef << X -> undef if (if it's NSW/NUW) 1359 if (match(Op0, m_Undef())) 1360 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType()); 1361 1362 // (X >> A) << A -> X 1363 Value *X; 1364 if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1))))) 1365 return X; 1366 return nullptr; 1367 } 1368 1369 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 1370 const SimplifyQuery &Q) { 1371 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit); 1372 } 1373 1374 /// Given operands for an LShr, see if we can fold the result. 1375 /// If not, this returns null. 1376 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 1377 const SimplifyQuery &Q, unsigned MaxRecurse) { 1378 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q, 1379 MaxRecurse)) 1380 return V; 1381 1382 // (X << A) >> A -> X 1383 Value *X; 1384 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1)))) 1385 return X; 1386 1387 return nullptr; 1388 } 1389 1390 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 1391 const SimplifyQuery &Q) { 1392 return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit); 1393 } 1394 1395 /// Given operands for an AShr, see if we can fold the result. 1396 /// If not, this returns null. 1397 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 1398 const SimplifyQuery &Q, unsigned MaxRecurse) { 1399 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q, 1400 MaxRecurse)) 1401 return V; 1402 1403 // all ones >>a X -> all ones 1404 if (match(Op0, m_AllOnes())) 1405 return Op0; 1406 1407 // (X << A) >> A -> X 1408 Value *X; 1409 if (match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1)))) 1410 return X; 1411 1412 // Arithmetic shifting an all-sign-bit value is a no-op. 1413 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1414 if (NumSignBits == Op0->getType()->getScalarSizeInBits()) 1415 return Op0; 1416 1417 return nullptr; 1418 } 1419 1420 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 1421 const SimplifyQuery &Q) { 1422 return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit); 1423 } 1424 1425 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, 1426 ICmpInst *UnsignedICmp, bool IsAnd) { 1427 Value *X, *Y; 1428 1429 ICmpInst::Predicate EqPred; 1430 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) || 1431 !ICmpInst::isEquality(EqPred)) 1432 return nullptr; 1433 1434 ICmpInst::Predicate UnsignedPred; 1435 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) && 1436 ICmpInst::isUnsigned(UnsignedPred)) 1437 ; 1438 else if (match(UnsignedICmp, 1439 m_ICmp(UnsignedPred, m_Value(Y), m_Specific(X))) && 1440 ICmpInst::isUnsigned(UnsignedPred)) 1441 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred); 1442 else 1443 return nullptr; 1444 1445 // X < Y && Y != 0 --> X < Y 1446 // X < Y || Y != 0 --> Y != 0 1447 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE) 1448 return IsAnd ? UnsignedICmp : ZeroICmp; 1449 1450 // X >= Y || Y != 0 --> true 1451 // X >= Y || Y == 0 --> X >= Y 1452 if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd) { 1453 if (EqPred == ICmpInst::ICMP_NE) 1454 return getTrue(UnsignedICmp->getType()); 1455 return UnsignedICmp; 1456 } 1457 1458 // X < Y && Y == 0 --> false 1459 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ && 1460 IsAnd) 1461 return getFalse(UnsignedICmp->getType()); 1462 1463 return nullptr; 1464 } 1465 1466 /// Commuted variants are assumed to be handled by calling this function again 1467 /// with the parameters swapped. 1468 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { 1469 ICmpInst::Predicate Pred0, Pred1; 1470 Value *A ,*B; 1471 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || 1472 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) 1473 return nullptr; 1474 1475 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B). 1476 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we 1477 // can eliminate Op1 from this 'and'. 1478 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) 1479 return Op0; 1480 1481 // Check for any combination of predicates that are guaranteed to be disjoint. 1482 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || 1483 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) || 1484 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) || 1485 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)) 1486 return getFalse(Op0->getType()); 1487 1488 return nullptr; 1489 } 1490 1491 /// Commuted variants are assumed to be handled by calling this function again 1492 /// with the parameters swapped. 1493 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { 1494 ICmpInst::Predicate Pred0, Pred1; 1495 Value *A ,*B; 1496 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || 1497 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) 1498 return nullptr; 1499 1500 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B). 1501 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we 1502 // can eliminate Op0 from this 'or'. 1503 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) 1504 return Op1; 1505 1506 // Check for any combination of predicates that cover the entire range of 1507 // possibilities. 1508 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || 1509 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) || 1510 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) || 1511 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE)) 1512 return getTrue(Op0->getType()); 1513 1514 return nullptr; 1515 } 1516 1517 /// Test if a pair of compares with a shared operand and 2 constants has an 1518 /// empty set intersection, full set union, or if one compare is a superset of 1519 /// the other. 1520 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, 1521 bool IsAnd) { 1522 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)). 1523 if (Cmp0->getOperand(0) != Cmp1->getOperand(0)) 1524 return nullptr; 1525 1526 const APInt *C0, *C1; 1527 if (!match(Cmp0->getOperand(1), m_APInt(C0)) || 1528 !match(Cmp1->getOperand(1), m_APInt(C1))) 1529 return nullptr; 1530 1531 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0); 1532 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1); 1533 1534 // For and-of-compares, check if the intersection is empty: 1535 // (icmp X, C0) && (icmp X, C1) --> empty set --> false 1536 if (IsAnd && Range0.intersectWith(Range1).isEmptySet()) 1537 return getFalse(Cmp0->getType()); 1538 1539 // For or-of-compares, check if the union is full: 1540 // (icmp X, C0) || (icmp X, C1) --> full set --> true 1541 if (!IsAnd && Range0.unionWith(Range1).isFullSet()) 1542 return getTrue(Cmp0->getType()); 1543 1544 // Is one range a superset of the other? 1545 // If this is and-of-compares, take the smaller set: 1546 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42 1547 // If this is or-of-compares, take the larger set: 1548 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4 1549 if (Range0.contains(Range1)) 1550 return IsAnd ? Cmp1 : Cmp0; 1551 if (Range1.contains(Range0)) 1552 return IsAnd ? Cmp0 : Cmp1; 1553 1554 return nullptr; 1555 } 1556 1557 /// Commuted variants are assumed to be handled by calling this function again 1558 /// with the parameters swapped. 1559 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) { 1560 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true)) 1561 return X; 1562 1563 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1)) 1564 return X; 1565 1566 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true)) 1567 return X; 1568 1569 // (icmp (add V, C0), C1) & (icmp V, C0) 1570 Type *ITy = Op0->getType(); 1571 ICmpInst::Predicate Pred0, Pred1; 1572 const APInt *C0, *C1; 1573 Value *V; 1574 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) 1575 return nullptr; 1576 1577 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) 1578 return nullptr; 1579 1580 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0)); 1581 if (AddInst->getOperand(1) != Op1->getOperand(1)) 1582 return nullptr; 1583 1584 bool isNSW = AddInst->hasNoSignedWrap(); 1585 bool isNUW = AddInst->hasNoUnsignedWrap(); 1586 1587 const APInt Delta = *C1 - *C0; 1588 if (C0->isStrictlyPositive()) { 1589 if (Delta == 2) { 1590 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT) 1591 return getFalse(ITy); 1592 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW) 1593 return getFalse(ITy); 1594 } 1595 if (Delta == 1) { 1596 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT) 1597 return getFalse(ITy); 1598 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW) 1599 return getFalse(ITy); 1600 } 1601 } 1602 if (C0->getBoolValue() && isNUW) { 1603 if (Delta == 2) 1604 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT) 1605 return getFalse(ITy); 1606 if (Delta == 1) 1607 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT) 1608 return getFalse(ITy); 1609 } 1610 1611 return nullptr; 1612 } 1613 1614 /// Commuted variants are assumed to be handled by calling this function again 1615 /// with the parameters swapped. 1616 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) { 1617 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false)) 1618 return X; 1619 1620 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1)) 1621 return X; 1622 1623 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false)) 1624 return X; 1625 1626 // (icmp (add V, C0), C1) | (icmp V, C0) 1627 ICmpInst::Predicate Pred0, Pred1; 1628 const APInt *C0, *C1; 1629 Value *V; 1630 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) 1631 return nullptr; 1632 1633 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) 1634 return nullptr; 1635 1636 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0)); 1637 if (AddInst->getOperand(1) != Op1->getOperand(1)) 1638 return nullptr; 1639 1640 Type *ITy = Op0->getType(); 1641 bool isNSW = AddInst->hasNoSignedWrap(); 1642 bool isNUW = AddInst->hasNoUnsignedWrap(); 1643 1644 const APInt Delta = *C1 - *C0; 1645 if (C0->isStrictlyPositive()) { 1646 if (Delta == 2) { 1647 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE) 1648 return getTrue(ITy); 1649 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW) 1650 return getTrue(ITy); 1651 } 1652 if (Delta == 1) { 1653 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE) 1654 return getTrue(ITy); 1655 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW) 1656 return getTrue(ITy); 1657 } 1658 } 1659 if (C0->getBoolValue() && isNUW) { 1660 if (Delta == 2) 1661 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE) 1662 return getTrue(ITy); 1663 if (Delta == 1) 1664 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE) 1665 return getTrue(ITy); 1666 } 1667 1668 return nullptr; 1669 } 1670 1671 static Value *simplifyPossiblyCastedAndOrOfICmps(ICmpInst *Cmp0, ICmpInst *Cmp1, 1672 bool IsAnd, CastInst *Cast) { 1673 Value *V = 1674 IsAnd ? simplifyAndOfICmps(Cmp0, Cmp1) : simplifyOrOfICmps(Cmp0, Cmp1); 1675 if (!V) 1676 return nullptr; 1677 if (!Cast) 1678 return V; 1679 1680 // If we looked through casts, we can only handle a constant simplification 1681 // because we are not allowed to create a cast instruction here. 1682 if (auto *C = dyn_cast<Constant>(V)) 1683 return ConstantExpr::getCast(Cast->getOpcode(), C, Cast->getType()); 1684 1685 return nullptr; 1686 } 1687 1688 static Value *simplifyAndOrOfICmps(Value *Op0, Value *Op1, bool IsAnd) { 1689 // Look through casts of the 'and' operands to find compares. 1690 auto *Cast0 = dyn_cast<CastInst>(Op0); 1691 auto *Cast1 = dyn_cast<CastInst>(Op1); 1692 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() && 1693 Cast0->getSrcTy() == Cast1->getSrcTy()) { 1694 Op0 = Cast0->getOperand(0); 1695 Op1 = Cast1->getOperand(0); 1696 } 1697 1698 auto *Cmp0 = dyn_cast<ICmpInst>(Op0); 1699 auto *Cmp1 = dyn_cast<ICmpInst>(Op1); 1700 if (!Cmp0 || !Cmp1) 1701 return nullptr; 1702 1703 if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp0, Cmp1, IsAnd, Cast0)) 1704 return V; 1705 if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp1, Cmp0, IsAnd, Cast0)) 1706 return V; 1707 1708 return nullptr; 1709 } 1710 1711 /// Given operands for an And, see if we can fold the result. 1712 /// If not, this returns null. 1713 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1714 unsigned MaxRecurse) { 1715 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q)) 1716 return C; 1717 1718 // X & undef -> 0 1719 if (match(Op1, m_Undef())) 1720 return Constant::getNullValue(Op0->getType()); 1721 1722 // X & X = X 1723 if (Op0 == Op1) 1724 return Op0; 1725 1726 // X & 0 = 0 1727 if (match(Op1, m_Zero())) 1728 return Op1; 1729 1730 // X & -1 = X 1731 if (match(Op1, m_AllOnes())) 1732 return Op0; 1733 1734 // A & ~A = ~A & A = 0 1735 if (match(Op0, m_Not(m_Specific(Op1))) || 1736 match(Op1, m_Not(m_Specific(Op0)))) 1737 return Constant::getNullValue(Op0->getType()); 1738 1739 // (A | ?) & A = A 1740 Value *A = nullptr, *B = nullptr; 1741 if (match(Op0, m_Or(m_Value(A), m_Value(B))) && 1742 (A == Op1 || B == Op1)) 1743 return Op1; 1744 1745 // A & (A | ?) = A 1746 if (match(Op1, m_Or(m_Value(A), m_Value(B))) && 1747 (A == Op0 || B == Op0)) 1748 return Op0; 1749 1750 // A mask that only clears known zeros of a shifted value is a no-op. 1751 Value *X; 1752 const APInt *Mask; 1753 const APInt *ShAmt; 1754 if (match(Op1, m_APInt(Mask))) { 1755 // If all bits in the inverted and shifted mask are clear: 1756 // and (shl X, ShAmt), Mask --> shl X, ShAmt 1757 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) && 1758 (~(*Mask)).lshr(*ShAmt).isNullValue()) 1759 return Op0; 1760 1761 // If all bits in the inverted and shifted mask are clear: 1762 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt 1763 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) && 1764 (~(*Mask)).shl(*ShAmt).isNullValue()) 1765 return Op0; 1766 } 1767 1768 // A & (-A) = A if A is a power of two or zero. 1769 if (match(Op0, m_Neg(m_Specific(Op1))) || 1770 match(Op1, m_Neg(m_Specific(Op0)))) { 1771 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, 1772 Q.DT)) 1773 return Op0; 1774 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, 1775 Q.DT)) 1776 return Op1; 1777 } 1778 1779 if (Value *V = simplifyAndOrOfICmps(Op0, Op1, true)) 1780 return V; 1781 1782 // Try some generic simplifications for associative operations. 1783 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, 1784 MaxRecurse)) 1785 return V; 1786 1787 // And distributes over Or. Try some generic simplifications based on this. 1788 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or, 1789 Q, MaxRecurse)) 1790 return V; 1791 1792 // And distributes over Xor. Try some generic simplifications based on this. 1793 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor, 1794 Q, MaxRecurse)) 1795 return V; 1796 1797 // If the operation is with the result of a select instruction, check whether 1798 // operating on either branch of the select always yields the same value. 1799 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1800 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q, 1801 MaxRecurse)) 1802 return V; 1803 1804 // If the operation is with the result of a phi instruction, check whether 1805 // operating on all incoming values of the phi always yields the same value. 1806 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1807 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q, 1808 MaxRecurse)) 1809 return V; 1810 1811 return nullptr; 1812 } 1813 1814 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1815 return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit); 1816 } 1817 1818 /// Given operands for an Or, see if we can fold the result. 1819 /// If not, this returns null. 1820 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1821 unsigned MaxRecurse) { 1822 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q)) 1823 return C; 1824 1825 // X | undef -> -1 1826 if (match(Op1, m_Undef())) 1827 return Constant::getAllOnesValue(Op0->getType()); 1828 1829 // X | X = X 1830 if (Op0 == Op1) 1831 return Op0; 1832 1833 // X | 0 = X 1834 if (match(Op1, m_Zero())) 1835 return Op0; 1836 1837 // X | -1 = -1 1838 if (match(Op1, m_AllOnes())) 1839 return Op1; 1840 1841 // A | ~A = ~A | A = -1 1842 if (match(Op0, m_Not(m_Specific(Op1))) || 1843 match(Op1, m_Not(m_Specific(Op0)))) 1844 return Constant::getAllOnesValue(Op0->getType()); 1845 1846 // (A & ?) | A = A 1847 Value *A = nullptr, *B = nullptr; 1848 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 1849 (A == Op1 || B == Op1)) 1850 return Op1; 1851 1852 // A | (A & ?) = A 1853 if (match(Op1, m_And(m_Value(A), m_Value(B))) && 1854 (A == Op0 || B == Op0)) 1855 return Op0; 1856 1857 // ~(A & ?) | A = -1 1858 if (match(Op0, m_Not(m_And(m_Value(A), m_Value(B)))) && 1859 (A == Op1 || B == Op1)) 1860 return Constant::getAllOnesValue(Op1->getType()); 1861 1862 // A | ~(A & ?) = -1 1863 if (match(Op1, m_Not(m_And(m_Value(A), m_Value(B)))) && 1864 (A == Op0 || B == Op0)) 1865 return Constant::getAllOnesValue(Op0->getType()); 1866 1867 // (A & ~B) | (A ^ B) -> (A ^ B) 1868 // (~B & A) | (A ^ B) -> (A ^ B) 1869 // (A & ~B) | (B ^ A) -> (B ^ A) 1870 // (~B & A) | (B ^ A) -> (B ^ A) 1871 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && 1872 (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) || 1873 match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))) 1874 return Op1; 1875 1876 // Commute the 'or' operands. 1877 // (A ^ B) | (A & ~B) -> (A ^ B) 1878 // (A ^ B) | (~B & A) -> (A ^ B) 1879 // (B ^ A) | (A & ~B) -> (B ^ A) 1880 // (B ^ A) | (~B & A) -> (B ^ A) 1881 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && 1882 (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) || 1883 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))) 1884 return Op0; 1885 1886 // (A & B) | (~A ^ B) -> (~A ^ B) 1887 // (B & A) | (~A ^ B) -> (~A ^ B) 1888 // (A & B) | (B ^ ~A) -> (B ^ ~A) 1889 // (B & A) | (B ^ ~A) -> (B ^ ~A) 1890 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 1891 (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) || 1892 match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) 1893 return Op1; 1894 1895 // (~A ^ B) | (A & B) -> (~A ^ B) 1896 // (~A ^ B) | (B & A) -> (~A ^ B) 1897 // (B ^ ~A) | (A & B) -> (B ^ ~A) 1898 // (B ^ ~A) | (B & A) -> (B ^ ~A) 1899 if (match(Op1, m_And(m_Value(A), m_Value(B))) && 1900 (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) || 1901 match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) 1902 return Op0; 1903 1904 if (Value *V = simplifyAndOrOfICmps(Op0, Op1, false)) 1905 return V; 1906 1907 // Try some generic simplifications for associative operations. 1908 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, 1909 MaxRecurse)) 1910 return V; 1911 1912 // Or distributes over And. Try some generic simplifications based on this. 1913 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q, 1914 MaxRecurse)) 1915 return V; 1916 1917 // If the operation is with the result of a select instruction, check whether 1918 // operating on either branch of the select always yields the same value. 1919 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1920 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, 1921 MaxRecurse)) 1922 return V; 1923 1924 // (A & C)|(B & D) 1925 Value *C = nullptr, *D = nullptr; 1926 if (match(Op0, m_And(m_Value(A), m_Value(C))) && 1927 match(Op1, m_And(m_Value(B), m_Value(D)))) { 1928 ConstantInt *C1 = dyn_cast<ConstantInt>(C); 1929 ConstantInt *C2 = dyn_cast<ConstantInt>(D); 1930 if (C1 && C2 && (C1->getValue() == ~C2->getValue())) { 1931 // (A & C1)|(B & C2) 1932 // If we have: ((V + N) & C1) | (V & C2) 1933 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0 1934 // replace with V+N. 1935 Value *V1, *V2; 1936 if ((C2->getValue() & (C2->getValue() + 1)) == 0 && // C2 == 0+1+ 1937 match(A, m_Add(m_Value(V1), m_Value(V2)))) { 1938 // Add commutes, try both ways. 1939 if (V1 == B && 1940 MaskedValueIsZero(V2, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 1941 return A; 1942 if (V2 == B && 1943 MaskedValueIsZero(V1, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 1944 return A; 1945 } 1946 // Or commutes, try both ways. 1947 if ((C1->getValue() & (C1->getValue() + 1)) == 0 && 1948 match(B, m_Add(m_Value(V1), m_Value(V2)))) { 1949 // Add commutes, try both ways. 1950 if (V1 == A && 1951 MaskedValueIsZero(V2, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 1952 return B; 1953 if (V2 == A && 1954 MaskedValueIsZero(V1, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 1955 return B; 1956 } 1957 } 1958 } 1959 1960 // If the operation is with the result of a phi instruction, check whether 1961 // operating on all incoming values of the phi always yields the same value. 1962 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1963 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse)) 1964 return V; 1965 1966 return nullptr; 1967 } 1968 1969 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1970 return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit); 1971 } 1972 1973 /// Given operands for a Xor, see if we can fold the result. 1974 /// If not, this returns null. 1975 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1976 unsigned MaxRecurse) { 1977 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q)) 1978 return C; 1979 1980 // A ^ undef -> undef 1981 if (match(Op1, m_Undef())) 1982 return Op1; 1983 1984 // A ^ 0 = A 1985 if (match(Op1, m_Zero())) 1986 return Op0; 1987 1988 // A ^ A = 0 1989 if (Op0 == Op1) 1990 return Constant::getNullValue(Op0->getType()); 1991 1992 // A ^ ~A = ~A ^ A = -1 1993 if (match(Op0, m_Not(m_Specific(Op1))) || 1994 match(Op1, m_Not(m_Specific(Op0)))) 1995 return Constant::getAllOnesValue(Op0->getType()); 1996 1997 // Try some generic simplifications for associative operations. 1998 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, 1999 MaxRecurse)) 2000 return V; 2001 2002 // Threading Xor over selects and phi nodes is pointless, so don't bother. 2003 // Threading over the select in "A ^ select(cond, B, C)" means evaluating 2004 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and 2005 // only if B and C are equal. If B and C are equal then (since we assume 2006 // that operands have already been simplified) "select(cond, B, C)" should 2007 // have been simplified to the common value of B and C already. Analysing 2008 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly 2009 // for threading over phi nodes. 2010 2011 return nullptr; 2012 } 2013 2014 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 2015 return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit); 2016 } 2017 2018 2019 static Type *GetCompareTy(Value *Op) { 2020 return CmpInst::makeCmpResultType(Op->getType()); 2021 } 2022 2023 /// Rummage around inside V looking for something equivalent to the comparison 2024 /// "LHS Pred RHS". Return such a value if found, otherwise return null. 2025 /// Helper function for analyzing max/min idioms. 2026 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred, 2027 Value *LHS, Value *RHS) { 2028 SelectInst *SI = dyn_cast<SelectInst>(V); 2029 if (!SI) 2030 return nullptr; 2031 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 2032 if (!Cmp) 2033 return nullptr; 2034 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1); 2035 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS) 2036 return Cmp; 2037 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) && 2038 LHS == CmpRHS && RHS == CmpLHS) 2039 return Cmp; 2040 return nullptr; 2041 } 2042 2043 // A significant optimization not implemented here is assuming that alloca 2044 // addresses are not equal to incoming argument values. They don't *alias*, 2045 // as we say, but that doesn't mean they aren't equal, so we take a 2046 // conservative approach. 2047 // 2048 // This is inspired in part by C++11 5.10p1: 2049 // "Two pointers of the same type compare equal if and only if they are both 2050 // null, both point to the same function, or both represent the same 2051 // address." 2052 // 2053 // This is pretty permissive. 2054 // 2055 // It's also partly due to C11 6.5.9p6: 2056 // "Two pointers compare equal if and only if both are null pointers, both are 2057 // pointers to the same object (including a pointer to an object and a 2058 // subobject at its beginning) or function, both are pointers to one past the 2059 // last element of the same array object, or one is a pointer to one past the 2060 // end of one array object and the other is a pointer to the start of a 2061 // different array object that happens to immediately follow the first array 2062 // object in the address space.) 2063 // 2064 // C11's version is more restrictive, however there's no reason why an argument 2065 // couldn't be a one-past-the-end value for a stack object in the caller and be 2066 // equal to the beginning of a stack object in the callee. 2067 // 2068 // If the C and C++ standards are ever made sufficiently restrictive in this 2069 // area, it may be possible to update LLVM's semantics accordingly and reinstate 2070 // this optimization. 2071 static Constant * 2072 computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI, 2073 const DominatorTree *DT, CmpInst::Predicate Pred, 2074 const Instruction *CxtI, Value *LHS, Value *RHS) { 2075 // First, skip past any trivial no-ops. 2076 LHS = LHS->stripPointerCasts(); 2077 RHS = RHS->stripPointerCasts(); 2078 2079 // A non-null pointer is not equal to a null pointer. 2080 if (llvm::isKnownNonNull(LHS) && isa<ConstantPointerNull>(RHS) && 2081 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE)) 2082 return ConstantInt::get(GetCompareTy(LHS), 2083 !CmpInst::isTrueWhenEqual(Pred)); 2084 2085 // We can only fold certain predicates on pointer comparisons. 2086 switch (Pred) { 2087 default: 2088 return nullptr; 2089 2090 // Equality comaprisons are easy to fold. 2091 case CmpInst::ICMP_EQ: 2092 case CmpInst::ICMP_NE: 2093 break; 2094 2095 // We can only handle unsigned relational comparisons because 'inbounds' on 2096 // a GEP only protects against unsigned wrapping. 2097 case CmpInst::ICMP_UGT: 2098 case CmpInst::ICMP_UGE: 2099 case CmpInst::ICMP_ULT: 2100 case CmpInst::ICMP_ULE: 2101 // However, we have to switch them to their signed variants to handle 2102 // negative indices from the base pointer. 2103 Pred = ICmpInst::getSignedPredicate(Pred); 2104 break; 2105 } 2106 2107 // Strip off any constant offsets so that we can reason about them. 2108 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets 2109 // here and compare base addresses like AliasAnalysis does, however there are 2110 // numerous hazards. AliasAnalysis and its utilities rely on special rules 2111 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis 2112 // doesn't need to guarantee pointer inequality when it says NoAlias. 2113 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 2114 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 2115 2116 // If LHS and RHS are related via constant offsets to the same base 2117 // value, we can replace it with an icmp which just compares the offsets. 2118 if (LHS == RHS) 2119 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset); 2120 2121 // Various optimizations for (in)equality comparisons. 2122 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { 2123 // Different non-empty allocations that exist at the same time have 2124 // different addresses (if the program can tell). Global variables always 2125 // exist, so they always exist during the lifetime of each other and all 2126 // allocas. Two different allocas usually have different addresses... 2127 // 2128 // However, if there's an @llvm.stackrestore dynamically in between two 2129 // allocas, they may have the same address. It's tempting to reduce the 2130 // scope of the problem by only looking at *static* allocas here. That would 2131 // cover the majority of allocas while significantly reducing the likelihood 2132 // of having an @llvm.stackrestore pop up in the middle. However, it's not 2133 // actually impossible for an @llvm.stackrestore to pop up in the middle of 2134 // an entry block. Also, if we have a block that's not attached to a 2135 // function, we can't tell if it's "static" under the current definition. 2136 // Theoretically, this problem could be fixed by creating a new kind of 2137 // instruction kind specifically for static allocas. Such a new instruction 2138 // could be required to be at the top of the entry block, thus preventing it 2139 // from being subject to a @llvm.stackrestore. Instcombine could even 2140 // convert regular allocas into these special allocas. It'd be nifty. 2141 // However, until then, this problem remains open. 2142 // 2143 // So, we'll assume that two non-empty allocas have different addresses 2144 // for now. 2145 // 2146 // With all that, if the offsets are within the bounds of their allocations 2147 // (and not one-past-the-end! so we can't use inbounds!), and their 2148 // allocations aren't the same, the pointers are not equal. 2149 // 2150 // Note that it's not necessary to check for LHS being a global variable 2151 // address, due to canonicalization and constant folding. 2152 if (isa<AllocaInst>(LHS) && 2153 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) { 2154 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset); 2155 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset); 2156 uint64_t LHSSize, RHSSize; 2157 if (LHSOffsetCI && RHSOffsetCI && 2158 getObjectSize(LHS, LHSSize, DL, TLI) && 2159 getObjectSize(RHS, RHSSize, DL, TLI)) { 2160 const APInt &LHSOffsetValue = LHSOffsetCI->getValue(); 2161 const APInt &RHSOffsetValue = RHSOffsetCI->getValue(); 2162 if (!LHSOffsetValue.isNegative() && 2163 !RHSOffsetValue.isNegative() && 2164 LHSOffsetValue.ult(LHSSize) && 2165 RHSOffsetValue.ult(RHSSize)) { 2166 return ConstantInt::get(GetCompareTy(LHS), 2167 !CmpInst::isTrueWhenEqual(Pred)); 2168 } 2169 } 2170 2171 // Repeat the above check but this time without depending on DataLayout 2172 // or being able to compute a precise size. 2173 if (!cast<PointerType>(LHS->getType())->isEmptyTy() && 2174 !cast<PointerType>(RHS->getType())->isEmptyTy() && 2175 LHSOffset->isNullValue() && 2176 RHSOffset->isNullValue()) 2177 return ConstantInt::get(GetCompareTy(LHS), 2178 !CmpInst::isTrueWhenEqual(Pred)); 2179 } 2180 2181 // Even if an non-inbounds GEP occurs along the path we can still optimize 2182 // equality comparisons concerning the result. We avoid walking the whole 2183 // chain again by starting where the last calls to 2184 // stripAndComputeConstantOffsets left off and accumulate the offsets. 2185 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true); 2186 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true); 2187 if (LHS == RHS) 2188 return ConstantExpr::getICmp(Pred, 2189 ConstantExpr::getAdd(LHSOffset, LHSNoBound), 2190 ConstantExpr::getAdd(RHSOffset, RHSNoBound)); 2191 2192 // If one side of the equality comparison must come from a noalias call 2193 // (meaning a system memory allocation function), and the other side must 2194 // come from a pointer that cannot overlap with dynamically-allocated 2195 // memory within the lifetime of the current function (allocas, byval 2196 // arguments, globals), then determine the comparison result here. 2197 SmallVector<Value *, 8> LHSUObjs, RHSUObjs; 2198 GetUnderlyingObjects(LHS, LHSUObjs, DL); 2199 GetUnderlyingObjects(RHS, RHSUObjs, DL); 2200 2201 // Is the set of underlying objects all noalias calls? 2202 auto IsNAC = [](ArrayRef<Value *> Objects) { 2203 return all_of(Objects, isNoAliasCall); 2204 }; 2205 2206 // Is the set of underlying objects all things which must be disjoint from 2207 // noalias calls. For allocas, we consider only static ones (dynamic 2208 // allocas might be transformed into calls to malloc not simultaneously 2209 // live with the compared-to allocation). For globals, we exclude symbols 2210 // that might be resolve lazily to symbols in another dynamically-loaded 2211 // library (and, thus, could be malloc'ed by the implementation). 2212 auto IsAllocDisjoint = [](ArrayRef<Value *> Objects) { 2213 return all_of(Objects, [](Value *V) { 2214 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) 2215 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca(); 2216 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 2217 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() || 2218 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) && 2219 !GV->isThreadLocal(); 2220 if (const Argument *A = dyn_cast<Argument>(V)) 2221 return A->hasByValAttr(); 2222 return false; 2223 }); 2224 }; 2225 2226 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) || 2227 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs))) 2228 return ConstantInt::get(GetCompareTy(LHS), 2229 !CmpInst::isTrueWhenEqual(Pred)); 2230 2231 // Fold comparisons for non-escaping pointer even if the allocation call 2232 // cannot be elided. We cannot fold malloc comparison to null. Also, the 2233 // dynamic allocation call could be either of the operands. 2234 Value *MI = nullptr; 2235 if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT)) 2236 MI = LHS; 2237 else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonNullAt(LHS, CxtI, DT)) 2238 MI = RHS; 2239 // FIXME: We should also fold the compare when the pointer escapes, but the 2240 // compare dominates the pointer escape 2241 if (MI && !PointerMayBeCaptured(MI, true, true)) 2242 return ConstantInt::get(GetCompareTy(LHS), 2243 CmpInst::isFalseWhenEqual(Pred)); 2244 } 2245 2246 // Otherwise, fail. 2247 return nullptr; 2248 } 2249 2250 /// Fold an icmp when its operands have i1 scalar type. 2251 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS, 2252 Value *RHS, const SimplifyQuery &Q) { 2253 Type *ITy = GetCompareTy(LHS); // The return type. 2254 Type *OpTy = LHS->getType(); // The operand type. 2255 if (!OpTy->getScalarType()->isIntegerTy(1)) 2256 return nullptr; 2257 2258 // A boolean compared to true/false can be simplified in 14 out of the 20 2259 // (10 predicates * 2 constants) possible combinations. Cases not handled here 2260 // require a 'not' of the LHS, so those must be transformed in InstCombine. 2261 if (match(RHS, m_Zero())) { 2262 switch (Pred) { 2263 case CmpInst::ICMP_NE: // X != 0 -> X 2264 case CmpInst::ICMP_UGT: // X >u 0 -> X 2265 case CmpInst::ICMP_SLT: // X <s 0 -> X 2266 return LHS; 2267 2268 case CmpInst::ICMP_ULT: // X <u 0 -> false 2269 case CmpInst::ICMP_SGT: // X >s 0 -> false 2270 return getFalse(ITy); 2271 2272 case CmpInst::ICMP_UGE: // X >=u 0 -> true 2273 case CmpInst::ICMP_SLE: // X <=s 0 -> true 2274 return getTrue(ITy); 2275 2276 default: break; 2277 } 2278 } else if (match(RHS, m_One())) { 2279 switch (Pred) { 2280 case CmpInst::ICMP_EQ: // X == 1 -> X 2281 case CmpInst::ICMP_UGE: // X >=u 1 -> X 2282 case CmpInst::ICMP_SLE: // X <=s -1 -> X 2283 return LHS; 2284 2285 case CmpInst::ICMP_UGT: // X >u 1 -> false 2286 case CmpInst::ICMP_SLT: // X <s -1 -> false 2287 return getFalse(ITy); 2288 2289 case CmpInst::ICMP_ULE: // X <=u 1 -> true 2290 case CmpInst::ICMP_SGE: // X >=s -1 -> true 2291 return getTrue(ITy); 2292 2293 default: break; 2294 } 2295 } 2296 2297 switch (Pred) { 2298 default: 2299 break; 2300 case ICmpInst::ICMP_UGE: 2301 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false)) 2302 return getTrue(ITy); 2303 break; 2304 case ICmpInst::ICMP_SGE: 2305 /// For signed comparison, the values for an i1 are 0 and -1 2306 /// respectively. This maps into a truth table of: 2307 /// LHS | RHS | LHS >=s RHS | LHS implies RHS 2308 /// 0 | 0 | 1 (0 >= 0) | 1 2309 /// 0 | 1 | 1 (0 >= -1) | 1 2310 /// 1 | 0 | 0 (-1 >= 0) | 0 2311 /// 1 | 1 | 1 (-1 >= -1) | 1 2312 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false)) 2313 return getTrue(ITy); 2314 break; 2315 case ICmpInst::ICMP_ULE: 2316 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false)) 2317 return getTrue(ITy); 2318 break; 2319 } 2320 2321 return nullptr; 2322 } 2323 2324 /// Try hard to fold icmp with zero RHS because this is a common case. 2325 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS, 2326 Value *RHS, const SimplifyQuery &Q) { 2327 if (!match(RHS, m_Zero())) 2328 return nullptr; 2329 2330 Type *ITy = GetCompareTy(LHS); // The return type. 2331 switch (Pred) { 2332 default: 2333 llvm_unreachable("Unknown ICmp predicate!"); 2334 case ICmpInst::ICMP_ULT: 2335 return getFalse(ITy); 2336 case ICmpInst::ICMP_UGE: 2337 return getTrue(ITy); 2338 case ICmpInst::ICMP_EQ: 2339 case ICmpInst::ICMP_ULE: 2340 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2341 return getFalse(ITy); 2342 break; 2343 case ICmpInst::ICMP_NE: 2344 case ICmpInst::ICMP_UGT: 2345 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2346 return getTrue(ITy); 2347 break; 2348 case ICmpInst::ICMP_SLT: { 2349 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2350 if (LHSKnown.isNegative()) 2351 return getTrue(ITy); 2352 if (LHSKnown.isNonNegative()) 2353 return getFalse(ITy); 2354 break; 2355 } 2356 case ICmpInst::ICMP_SLE: { 2357 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2358 if (LHSKnown.isNegative()) 2359 return getTrue(ITy); 2360 if (LHSKnown.isNonNegative() && 2361 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2362 return getFalse(ITy); 2363 break; 2364 } 2365 case ICmpInst::ICMP_SGE: { 2366 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2367 if (LHSKnown.isNegative()) 2368 return getFalse(ITy); 2369 if (LHSKnown.isNonNegative()) 2370 return getTrue(ITy); 2371 break; 2372 } 2373 case ICmpInst::ICMP_SGT: { 2374 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2375 if (LHSKnown.isNegative()) 2376 return getFalse(ITy); 2377 if (LHSKnown.isNonNegative() && 2378 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2379 return getTrue(ITy); 2380 break; 2381 } 2382 } 2383 2384 return nullptr; 2385 } 2386 2387 /// Many binary operators with a constant operand have an easy-to-compute 2388 /// range of outputs. This can be used to fold a comparison to always true or 2389 /// always false. 2390 static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper) { 2391 unsigned Width = Lower.getBitWidth(); 2392 const APInt *C; 2393 switch (BO.getOpcode()) { 2394 case Instruction::Add: 2395 if (match(BO.getOperand(1), m_APInt(C)) && *C != 0) { 2396 // FIXME: If we have both nuw and nsw, we should reduce the range further. 2397 if (BO.hasNoUnsignedWrap()) { 2398 // 'add nuw x, C' produces [C, UINT_MAX]. 2399 Lower = *C; 2400 } else if (BO.hasNoSignedWrap()) { 2401 if (C->isNegative()) { 2402 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C]. 2403 Lower = APInt::getSignedMinValue(Width); 2404 Upper = APInt::getSignedMaxValue(Width) + *C + 1; 2405 } else { 2406 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX]. 2407 Lower = APInt::getSignedMinValue(Width) + *C; 2408 Upper = APInt::getSignedMaxValue(Width) + 1; 2409 } 2410 } 2411 } 2412 break; 2413 2414 case Instruction::And: 2415 if (match(BO.getOperand(1), m_APInt(C))) 2416 // 'and x, C' produces [0, C]. 2417 Upper = *C + 1; 2418 break; 2419 2420 case Instruction::Or: 2421 if (match(BO.getOperand(1), m_APInt(C))) 2422 // 'or x, C' produces [C, UINT_MAX]. 2423 Lower = *C; 2424 break; 2425 2426 case Instruction::AShr: 2427 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { 2428 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C]. 2429 Lower = APInt::getSignedMinValue(Width).ashr(*C); 2430 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1; 2431 } else if (match(BO.getOperand(0), m_APInt(C))) { 2432 unsigned ShiftAmount = Width - 1; 2433 if (*C != 0 && BO.isExact()) 2434 ShiftAmount = C->countTrailingZeros(); 2435 if (C->isNegative()) { 2436 // 'ashr C, x' produces [C, C >> (Width-1)] 2437 Lower = *C; 2438 Upper = C->ashr(ShiftAmount) + 1; 2439 } else { 2440 // 'ashr C, x' produces [C >> (Width-1), C] 2441 Lower = C->ashr(ShiftAmount); 2442 Upper = *C + 1; 2443 } 2444 } 2445 break; 2446 2447 case Instruction::LShr: 2448 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { 2449 // 'lshr x, C' produces [0, UINT_MAX >> C]. 2450 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1; 2451 } else if (match(BO.getOperand(0), m_APInt(C))) { 2452 // 'lshr C, x' produces [C >> (Width-1), C]. 2453 unsigned ShiftAmount = Width - 1; 2454 if (*C != 0 && BO.isExact()) 2455 ShiftAmount = C->countTrailingZeros(); 2456 Lower = C->lshr(ShiftAmount); 2457 Upper = *C + 1; 2458 } 2459 break; 2460 2461 case Instruction::Shl: 2462 if (match(BO.getOperand(0), m_APInt(C))) { 2463 if (BO.hasNoUnsignedWrap()) { 2464 // 'shl nuw C, x' produces [C, C << CLZ(C)] 2465 Lower = *C; 2466 Upper = Lower.shl(Lower.countLeadingZeros()) + 1; 2467 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw? 2468 if (C->isNegative()) { 2469 // 'shl nsw C, x' produces [C << CLO(C)-1, C] 2470 unsigned ShiftAmount = C->countLeadingOnes() - 1; 2471 Lower = C->shl(ShiftAmount); 2472 Upper = *C + 1; 2473 } else { 2474 // 'shl nsw C, x' produces [C, C << CLZ(C)-1] 2475 unsigned ShiftAmount = C->countLeadingZeros() - 1; 2476 Lower = *C; 2477 Upper = C->shl(ShiftAmount) + 1; 2478 } 2479 } 2480 } 2481 break; 2482 2483 case Instruction::SDiv: 2484 if (match(BO.getOperand(1), m_APInt(C))) { 2485 APInt IntMin = APInt::getSignedMinValue(Width); 2486 APInt IntMax = APInt::getSignedMaxValue(Width); 2487 if (C->isAllOnesValue()) { 2488 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] 2489 // where C != -1 and C != 0 and C != 1 2490 Lower = IntMin + 1; 2491 Upper = IntMax + 1; 2492 } else if (C->countLeadingZeros() < Width - 1) { 2493 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C] 2494 // where C != -1 and C != 0 and C != 1 2495 Lower = IntMin.sdiv(*C); 2496 Upper = IntMax.sdiv(*C); 2497 if (Lower.sgt(Upper)) 2498 std::swap(Lower, Upper); 2499 Upper = Upper + 1; 2500 assert(Upper != Lower && "Upper part of range has wrapped!"); 2501 } 2502 } else if (match(BO.getOperand(0), m_APInt(C))) { 2503 if (C->isMinSignedValue()) { 2504 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. 2505 Lower = *C; 2506 Upper = Lower.lshr(1) + 1; 2507 } else { 2508 // 'sdiv C, x' produces [-|C|, |C|]. 2509 Upper = C->abs() + 1; 2510 Lower = (-Upper) + 1; 2511 } 2512 } 2513 break; 2514 2515 case Instruction::UDiv: 2516 if (match(BO.getOperand(1), m_APInt(C)) && *C != 0) { 2517 // 'udiv x, C' produces [0, UINT_MAX / C]. 2518 Upper = APInt::getMaxValue(Width).udiv(*C) + 1; 2519 } else if (match(BO.getOperand(0), m_APInt(C))) { 2520 // 'udiv C, x' produces [0, C]. 2521 Upper = *C + 1; 2522 } 2523 break; 2524 2525 case Instruction::SRem: 2526 if (match(BO.getOperand(1), m_APInt(C))) { 2527 // 'srem x, C' produces (-|C|, |C|). 2528 Upper = C->abs(); 2529 Lower = (-Upper) + 1; 2530 } 2531 break; 2532 2533 case Instruction::URem: 2534 if (match(BO.getOperand(1), m_APInt(C))) 2535 // 'urem x, C' produces [0, C). 2536 Upper = *C; 2537 break; 2538 2539 default: 2540 break; 2541 } 2542 } 2543 2544 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS, 2545 Value *RHS) { 2546 const APInt *C; 2547 if (!match(RHS, m_APInt(C))) 2548 return nullptr; 2549 2550 // Rule out tautological comparisons (eg., ult 0 or uge 0). 2551 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C); 2552 if (RHS_CR.isEmptySet()) 2553 return ConstantInt::getFalse(GetCompareTy(RHS)); 2554 if (RHS_CR.isFullSet()) 2555 return ConstantInt::getTrue(GetCompareTy(RHS)); 2556 2557 // Find the range of possible values for binary operators. 2558 unsigned Width = C->getBitWidth(); 2559 APInt Lower = APInt(Width, 0); 2560 APInt Upper = APInt(Width, 0); 2561 if (auto *BO = dyn_cast<BinaryOperator>(LHS)) 2562 setLimitsForBinOp(*BO, Lower, Upper); 2563 2564 ConstantRange LHS_CR = 2565 Lower != Upper ? ConstantRange(Lower, Upper) : ConstantRange(Width, true); 2566 2567 if (auto *I = dyn_cast<Instruction>(LHS)) 2568 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range)) 2569 LHS_CR = LHS_CR.intersectWith(getConstantRangeFromMetadata(*Ranges)); 2570 2571 if (!LHS_CR.isFullSet()) { 2572 if (RHS_CR.contains(LHS_CR)) 2573 return ConstantInt::getTrue(GetCompareTy(RHS)); 2574 if (RHS_CR.inverse().contains(LHS_CR)) 2575 return ConstantInt::getFalse(GetCompareTy(RHS)); 2576 } 2577 2578 return nullptr; 2579 } 2580 2581 /// TODO: A large part of this logic is duplicated in InstCombine's 2582 /// foldICmpBinOp(). We should be able to share that and avoid the code 2583 /// duplication. 2584 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, 2585 Value *RHS, const SimplifyQuery &Q, 2586 unsigned MaxRecurse) { 2587 Type *ITy = GetCompareTy(LHS); // The return type. 2588 2589 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS); 2590 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS); 2591 if (MaxRecurse && (LBO || RBO)) { 2592 // Analyze the case when either LHS or RHS is an add instruction. 2593 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 2594 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null). 2595 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false; 2596 if (LBO && LBO->getOpcode() == Instruction::Add) { 2597 A = LBO->getOperand(0); 2598 B = LBO->getOperand(1); 2599 NoLHSWrapProblem = 2600 ICmpInst::isEquality(Pred) || 2601 (CmpInst::isUnsigned(Pred) && LBO->hasNoUnsignedWrap()) || 2602 (CmpInst::isSigned(Pred) && LBO->hasNoSignedWrap()); 2603 } 2604 if (RBO && RBO->getOpcode() == Instruction::Add) { 2605 C = RBO->getOperand(0); 2606 D = RBO->getOperand(1); 2607 NoRHSWrapProblem = 2608 ICmpInst::isEquality(Pred) || 2609 (CmpInst::isUnsigned(Pred) && RBO->hasNoUnsignedWrap()) || 2610 (CmpInst::isSigned(Pred) && RBO->hasNoSignedWrap()); 2611 } 2612 2613 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. 2614 if ((A == RHS || B == RHS) && NoLHSWrapProblem) 2615 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A, 2616 Constant::getNullValue(RHS->getType()), Q, 2617 MaxRecurse - 1)) 2618 return V; 2619 2620 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. 2621 if ((C == LHS || D == LHS) && NoRHSWrapProblem) 2622 if (Value *V = 2623 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()), 2624 C == LHS ? D : C, Q, MaxRecurse - 1)) 2625 return V; 2626 2627 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow. 2628 if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem && 2629 NoRHSWrapProblem) { 2630 // Determine Y and Z in the form icmp (X+Y), (X+Z). 2631 Value *Y, *Z; 2632 if (A == C) { 2633 // C + B == C + D -> B == D 2634 Y = B; 2635 Z = D; 2636 } else if (A == D) { 2637 // D + B == C + D -> B == C 2638 Y = B; 2639 Z = C; 2640 } else if (B == C) { 2641 // A + C == C + D -> A == D 2642 Y = A; 2643 Z = D; 2644 } else { 2645 assert(B == D); 2646 // A + D == C + D -> A == C 2647 Y = A; 2648 Z = C; 2649 } 2650 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1)) 2651 return V; 2652 } 2653 } 2654 2655 { 2656 Value *Y = nullptr; 2657 // icmp pred (or X, Y), X 2658 if (LBO && match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) { 2659 if (Pred == ICmpInst::ICMP_ULT) 2660 return getFalse(ITy); 2661 if (Pred == ICmpInst::ICMP_UGE) 2662 return getTrue(ITy); 2663 2664 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) { 2665 KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2666 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2667 if (RHSKnown.isNonNegative() && YKnown.isNegative()) 2668 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy); 2669 if (RHSKnown.isNegative() || YKnown.isNonNegative()) 2670 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy); 2671 } 2672 } 2673 // icmp pred X, (or X, Y) 2674 if (RBO && match(RBO, m_c_Or(m_Value(Y), m_Specific(LHS)))) { 2675 if (Pred == ICmpInst::ICMP_ULE) 2676 return getTrue(ITy); 2677 if (Pred == ICmpInst::ICMP_UGT) 2678 return getFalse(ITy); 2679 2680 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) { 2681 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2682 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2683 if (LHSKnown.isNonNegative() && YKnown.isNegative()) 2684 return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy); 2685 if (LHSKnown.isNegative() || YKnown.isNonNegative()) 2686 return Pred == ICmpInst::ICMP_SGT ? getFalse(ITy) : getTrue(ITy); 2687 } 2688 } 2689 } 2690 2691 // icmp pred (and X, Y), X 2692 if (LBO && match(LBO, m_CombineOr(m_And(m_Value(), m_Specific(RHS)), 2693 m_And(m_Specific(RHS), m_Value())))) { 2694 if (Pred == ICmpInst::ICMP_UGT) 2695 return getFalse(ITy); 2696 if (Pred == ICmpInst::ICMP_ULE) 2697 return getTrue(ITy); 2698 } 2699 // icmp pred X, (and X, Y) 2700 if (RBO && match(RBO, m_CombineOr(m_And(m_Value(), m_Specific(LHS)), 2701 m_And(m_Specific(LHS), m_Value())))) { 2702 if (Pred == ICmpInst::ICMP_UGE) 2703 return getTrue(ITy); 2704 if (Pred == ICmpInst::ICMP_ULT) 2705 return getFalse(ITy); 2706 } 2707 2708 // 0 - (zext X) pred C 2709 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) { 2710 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) { 2711 if (RHSC->getValue().isStrictlyPositive()) { 2712 if (Pred == ICmpInst::ICMP_SLT) 2713 return ConstantInt::getTrue(RHSC->getContext()); 2714 if (Pred == ICmpInst::ICMP_SGE) 2715 return ConstantInt::getFalse(RHSC->getContext()); 2716 if (Pred == ICmpInst::ICMP_EQ) 2717 return ConstantInt::getFalse(RHSC->getContext()); 2718 if (Pred == ICmpInst::ICMP_NE) 2719 return ConstantInt::getTrue(RHSC->getContext()); 2720 } 2721 if (RHSC->getValue().isNonNegative()) { 2722 if (Pred == ICmpInst::ICMP_SLE) 2723 return ConstantInt::getTrue(RHSC->getContext()); 2724 if (Pred == ICmpInst::ICMP_SGT) 2725 return ConstantInt::getFalse(RHSC->getContext()); 2726 } 2727 } 2728 } 2729 2730 // icmp pred (urem X, Y), Y 2731 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) { 2732 switch (Pred) { 2733 default: 2734 break; 2735 case ICmpInst::ICMP_SGT: 2736 case ICmpInst::ICMP_SGE: { 2737 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2738 if (!Known.isNonNegative()) 2739 break; 2740 LLVM_FALLTHROUGH; 2741 } 2742 case ICmpInst::ICMP_EQ: 2743 case ICmpInst::ICMP_UGT: 2744 case ICmpInst::ICMP_UGE: 2745 return getFalse(ITy); 2746 case ICmpInst::ICMP_SLT: 2747 case ICmpInst::ICMP_SLE: { 2748 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2749 if (!Known.isNonNegative()) 2750 break; 2751 LLVM_FALLTHROUGH; 2752 } 2753 case ICmpInst::ICMP_NE: 2754 case ICmpInst::ICMP_ULT: 2755 case ICmpInst::ICMP_ULE: 2756 return getTrue(ITy); 2757 } 2758 } 2759 2760 // icmp pred X, (urem Y, X) 2761 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) { 2762 switch (Pred) { 2763 default: 2764 break; 2765 case ICmpInst::ICMP_SGT: 2766 case ICmpInst::ICMP_SGE: { 2767 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2768 if (!Known.isNonNegative()) 2769 break; 2770 LLVM_FALLTHROUGH; 2771 } 2772 case ICmpInst::ICMP_NE: 2773 case ICmpInst::ICMP_UGT: 2774 case ICmpInst::ICMP_UGE: 2775 return getTrue(ITy); 2776 case ICmpInst::ICMP_SLT: 2777 case ICmpInst::ICMP_SLE: { 2778 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2779 if (!Known.isNonNegative()) 2780 break; 2781 LLVM_FALLTHROUGH; 2782 } 2783 case ICmpInst::ICMP_EQ: 2784 case ICmpInst::ICMP_ULT: 2785 case ICmpInst::ICMP_ULE: 2786 return getFalse(ITy); 2787 } 2788 } 2789 2790 // x >> y <=u x 2791 // x udiv y <=u x. 2792 if (LBO && (match(LBO, m_LShr(m_Specific(RHS), m_Value())) || 2793 match(LBO, m_UDiv(m_Specific(RHS), m_Value())))) { 2794 // icmp pred (X op Y), X 2795 if (Pred == ICmpInst::ICMP_UGT) 2796 return getFalse(ITy); 2797 if (Pred == ICmpInst::ICMP_ULE) 2798 return getTrue(ITy); 2799 } 2800 2801 // x >=u x >> y 2802 // x >=u x udiv y. 2803 if (RBO && (match(RBO, m_LShr(m_Specific(LHS), m_Value())) || 2804 match(RBO, m_UDiv(m_Specific(LHS), m_Value())))) { 2805 // icmp pred X, (X op Y) 2806 if (Pred == ICmpInst::ICMP_ULT) 2807 return getFalse(ITy); 2808 if (Pred == ICmpInst::ICMP_UGE) 2809 return getTrue(ITy); 2810 } 2811 2812 // handle: 2813 // CI2 << X == CI 2814 // CI2 << X != CI 2815 // 2816 // where CI2 is a power of 2 and CI isn't 2817 if (auto *CI = dyn_cast<ConstantInt>(RHS)) { 2818 const APInt *CI2Val, *CIVal = &CI->getValue(); 2819 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) && 2820 CI2Val->isPowerOf2()) { 2821 if (!CIVal->isPowerOf2()) { 2822 // CI2 << X can equal zero in some circumstances, 2823 // this simplification is unsafe if CI is zero. 2824 // 2825 // We know it is safe if: 2826 // - The shift is nsw, we can't shift out the one bit. 2827 // - The shift is nuw, we can't shift out the one bit. 2828 // - CI2 is one 2829 // - CI isn't zero 2830 if (LBO->hasNoSignedWrap() || LBO->hasNoUnsignedWrap() || 2831 *CI2Val == 1 || !CI->isZero()) { 2832 if (Pred == ICmpInst::ICMP_EQ) 2833 return ConstantInt::getFalse(RHS->getContext()); 2834 if (Pred == ICmpInst::ICMP_NE) 2835 return ConstantInt::getTrue(RHS->getContext()); 2836 } 2837 } 2838 if (CIVal->isSignMask() && *CI2Val == 1) { 2839 if (Pred == ICmpInst::ICMP_UGT) 2840 return ConstantInt::getFalse(RHS->getContext()); 2841 if (Pred == ICmpInst::ICMP_ULE) 2842 return ConstantInt::getTrue(RHS->getContext()); 2843 } 2844 } 2845 } 2846 2847 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() && 2848 LBO->getOperand(1) == RBO->getOperand(1)) { 2849 switch (LBO->getOpcode()) { 2850 default: 2851 break; 2852 case Instruction::UDiv: 2853 case Instruction::LShr: 2854 if (ICmpInst::isSigned(Pred) || !LBO->isExact() || !RBO->isExact()) 2855 break; 2856 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2857 RBO->getOperand(0), Q, MaxRecurse - 1)) 2858 return V; 2859 break; 2860 case Instruction::SDiv: 2861 if (!ICmpInst::isEquality(Pred) || !LBO->isExact() || !RBO->isExact()) 2862 break; 2863 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2864 RBO->getOperand(0), Q, MaxRecurse - 1)) 2865 return V; 2866 break; 2867 case Instruction::AShr: 2868 if (!LBO->isExact() || !RBO->isExact()) 2869 break; 2870 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2871 RBO->getOperand(0), Q, MaxRecurse - 1)) 2872 return V; 2873 break; 2874 case Instruction::Shl: { 2875 bool NUW = LBO->hasNoUnsignedWrap() && RBO->hasNoUnsignedWrap(); 2876 bool NSW = LBO->hasNoSignedWrap() && RBO->hasNoSignedWrap(); 2877 if (!NUW && !NSW) 2878 break; 2879 if (!NSW && ICmpInst::isSigned(Pred)) 2880 break; 2881 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2882 RBO->getOperand(0), Q, MaxRecurse - 1)) 2883 return V; 2884 break; 2885 } 2886 } 2887 } 2888 return nullptr; 2889 } 2890 2891 /// Simplify integer comparisons where at least one operand of the compare 2892 /// matches an integer min/max idiom. 2893 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS, 2894 Value *RHS, const SimplifyQuery &Q, 2895 unsigned MaxRecurse) { 2896 Type *ITy = GetCompareTy(LHS); // The return type. 2897 Value *A, *B; 2898 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE; 2899 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B". 2900 2901 // Signed variants on "max(a,b)>=a -> true". 2902 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 2903 if (A != RHS) 2904 std::swap(A, B); // smax(A, B) pred A. 2905 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 2906 // We analyze this as smax(A, B) pred A. 2907 P = Pred; 2908 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) && 2909 (A == LHS || B == LHS)) { 2910 if (A != LHS) 2911 std::swap(A, B); // A pred smax(A, B). 2912 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 2913 // We analyze this as smax(A, B) swapped-pred A. 2914 P = CmpInst::getSwappedPredicate(Pred); 2915 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 2916 (A == RHS || B == RHS)) { 2917 if (A != RHS) 2918 std::swap(A, B); // smin(A, B) pred A. 2919 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 2920 // We analyze this as smax(-A, -B) swapped-pred -A. 2921 // Note that we do not need to actually form -A or -B thanks to EqP. 2922 P = CmpInst::getSwappedPredicate(Pred); 2923 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) && 2924 (A == LHS || B == LHS)) { 2925 if (A != LHS) 2926 std::swap(A, B); // A pred smin(A, B). 2927 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 2928 // We analyze this as smax(-A, -B) pred -A. 2929 // Note that we do not need to actually form -A or -B thanks to EqP. 2930 P = Pred; 2931 } 2932 if (P != CmpInst::BAD_ICMP_PREDICATE) { 2933 // Cases correspond to "max(A, B) p A". 2934 switch (P) { 2935 default: 2936 break; 2937 case CmpInst::ICMP_EQ: 2938 case CmpInst::ICMP_SLE: 2939 // Equivalent to "A EqP B". This may be the same as the condition tested 2940 // in the max/min; if so, we can just return that. 2941 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 2942 return V; 2943 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 2944 return V; 2945 // Otherwise, see if "A EqP B" simplifies. 2946 if (MaxRecurse) 2947 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1)) 2948 return V; 2949 break; 2950 case CmpInst::ICMP_NE: 2951 case CmpInst::ICMP_SGT: { 2952 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 2953 // Equivalent to "A InvEqP B". This may be the same as the condition 2954 // tested in the max/min; if so, we can just return that. 2955 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 2956 return V; 2957 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 2958 return V; 2959 // Otherwise, see if "A InvEqP B" simplifies. 2960 if (MaxRecurse) 2961 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1)) 2962 return V; 2963 break; 2964 } 2965 case CmpInst::ICMP_SGE: 2966 // Always true. 2967 return getTrue(ITy); 2968 case CmpInst::ICMP_SLT: 2969 // Always false. 2970 return getFalse(ITy); 2971 } 2972 } 2973 2974 // Unsigned variants on "max(a,b)>=a -> true". 2975 P = CmpInst::BAD_ICMP_PREDICATE; 2976 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 2977 if (A != RHS) 2978 std::swap(A, B); // umax(A, B) pred A. 2979 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 2980 // We analyze this as umax(A, B) pred A. 2981 P = Pred; 2982 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) && 2983 (A == LHS || B == LHS)) { 2984 if (A != LHS) 2985 std::swap(A, B); // A pred umax(A, B). 2986 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 2987 // We analyze this as umax(A, B) swapped-pred A. 2988 P = CmpInst::getSwappedPredicate(Pred); 2989 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 2990 (A == RHS || B == RHS)) { 2991 if (A != RHS) 2992 std::swap(A, B); // umin(A, B) pred A. 2993 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 2994 // We analyze this as umax(-A, -B) swapped-pred -A. 2995 // Note that we do not need to actually form -A or -B thanks to EqP. 2996 P = CmpInst::getSwappedPredicate(Pred); 2997 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) && 2998 (A == LHS || B == LHS)) { 2999 if (A != LHS) 3000 std::swap(A, B); // A pred umin(A, B). 3001 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 3002 // We analyze this as umax(-A, -B) pred -A. 3003 // Note that we do not need to actually form -A or -B thanks to EqP. 3004 P = Pred; 3005 } 3006 if (P != CmpInst::BAD_ICMP_PREDICATE) { 3007 // Cases correspond to "max(A, B) p A". 3008 switch (P) { 3009 default: 3010 break; 3011 case CmpInst::ICMP_EQ: 3012 case CmpInst::ICMP_ULE: 3013 // Equivalent to "A EqP B". This may be the same as the condition tested 3014 // in the max/min; if so, we can just return that. 3015 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 3016 return V; 3017 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 3018 return V; 3019 // Otherwise, see if "A EqP B" simplifies. 3020 if (MaxRecurse) 3021 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1)) 3022 return V; 3023 break; 3024 case CmpInst::ICMP_NE: 3025 case CmpInst::ICMP_UGT: { 3026 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 3027 // Equivalent to "A InvEqP B". This may be the same as the condition 3028 // tested in the max/min; if so, we can just return that. 3029 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 3030 return V; 3031 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 3032 return V; 3033 // Otherwise, see if "A InvEqP B" simplifies. 3034 if (MaxRecurse) 3035 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1)) 3036 return V; 3037 break; 3038 } 3039 case CmpInst::ICMP_UGE: 3040 // Always true. 3041 return getTrue(ITy); 3042 case CmpInst::ICMP_ULT: 3043 // Always false. 3044 return getFalse(ITy); 3045 } 3046 } 3047 3048 // Variants on "max(x,y) >= min(x,z)". 3049 Value *C, *D; 3050 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && 3051 match(RHS, m_SMin(m_Value(C), m_Value(D))) && 3052 (A == C || A == D || B == C || B == D)) { 3053 // max(x, ?) pred min(x, ?). 3054 if (Pred == CmpInst::ICMP_SGE) 3055 // Always true. 3056 return getTrue(ITy); 3057 if (Pred == CmpInst::ICMP_SLT) 3058 // Always false. 3059 return getFalse(ITy); 3060 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 3061 match(RHS, m_SMax(m_Value(C), m_Value(D))) && 3062 (A == C || A == D || B == C || B == D)) { 3063 // min(x, ?) pred max(x, ?). 3064 if (Pred == CmpInst::ICMP_SLE) 3065 // Always true. 3066 return getTrue(ITy); 3067 if (Pred == CmpInst::ICMP_SGT) 3068 // Always false. 3069 return getFalse(ITy); 3070 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && 3071 match(RHS, m_UMin(m_Value(C), m_Value(D))) && 3072 (A == C || A == D || B == C || B == D)) { 3073 // max(x, ?) pred min(x, ?). 3074 if (Pred == CmpInst::ICMP_UGE) 3075 // Always true. 3076 return getTrue(ITy); 3077 if (Pred == CmpInst::ICMP_ULT) 3078 // Always false. 3079 return getFalse(ITy); 3080 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 3081 match(RHS, m_UMax(m_Value(C), m_Value(D))) && 3082 (A == C || A == D || B == C || B == D)) { 3083 // min(x, ?) pred max(x, ?). 3084 if (Pred == CmpInst::ICMP_ULE) 3085 // Always true. 3086 return getTrue(ITy); 3087 if (Pred == CmpInst::ICMP_UGT) 3088 // Always false. 3089 return getFalse(ITy); 3090 } 3091 3092 return nullptr; 3093 } 3094 3095 /// Given operands for an ICmpInst, see if we can fold the result. 3096 /// If not, this returns null. 3097 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3098 const SimplifyQuery &Q, unsigned MaxRecurse) { 3099 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 3100 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!"); 3101 3102 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 3103 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 3104 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 3105 3106 // If we have a constant, make sure it is on the RHS. 3107 std::swap(LHS, RHS); 3108 Pred = CmpInst::getSwappedPredicate(Pred); 3109 } 3110 3111 Type *ITy = GetCompareTy(LHS); // The return type. 3112 3113 // icmp X, X -> true/false 3114 // X icmp undef -> true/false. For example, icmp ugt %X, undef -> false 3115 // because X could be 0. 3116 if (LHS == RHS || isa<UndefValue>(RHS)) 3117 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred)); 3118 3119 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q)) 3120 return V; 3121 3122 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q)) 3123 return V; 3124 3125 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS)) 3126 return V; 3127 3128 // If both operands have range metadata, use the metadata 3129 // to simplify the comparison. 3130 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) { 3131 auto RHS_Instr = cast<Instruction>(RHS); 3132 auto LHS_Instr = cast<Instruction>(LHS); 3133 3134 if (RHS_Instr->getMetadata(LLVMContext::MD_range) && 3135 LHS_Instr->getMetadata(LLVMContext::MD_range)) { 3136 auto RHS_CR = getConstantRangeFromMetadata( 3137 *RHS_Instr->getMetadata(LLVMContext::MD_range)); 3138 auto LHS_CR = getConstantRangeFromMetadata( 3139 *LHS_Instr->getMetadata(LLVMContext::MD_range)); 3140 3141 auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR); 3142 if (Satisfied_CR.contains(LHS_CR)) 3143 return ConstantInt::getTrue(RHS->getContext()); 3144 3145 auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion( 3146 CmpInst::getInversePredicate(Pred), RHS_CR); 3147 if (InversedSatisfied_CR.contains(LHS_CR)) 3148 return ConstantInt::getFalse(RHS->getContext()); 3149 } 3150 } 3151 3152 // Compare of cast, for example (zext X) != 0 -> X != 0 3153 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) { 3154 Instruction *LI = cast<CastInst>(LHS); 3155 Value *SrcOp = LI->getOperand(0); 3156 Type *SrcTy = SrcOp->getType(); 3157 Type *DstTy = LI->getType(); 3158 3159 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input 3160 // if the integer type is the same size as the pointer type. 3161 if (MaxRecurse && isa<PtrToIntInst>(LI) && 3162 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) { 3163 if (Constant *RHSC = dyn_cast<Constant>(RHS)) { 3164 // Transfer the cast to the constant. 3165 if (Value *V = SimplifyICmpInst(Pred, SrcOp, 3166 ConstantExpr::getIntToPtr(RHSC, SrcTy), 3167 Q, MaxRecurse-1)) 3168 return V; 3169 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) { 3170 if (RI->getOperand(0)->getType() == SrcTy) 3171 // Compare without the cast. 3172 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 3173 Q, MaxRecurse-1)) 3174 return V; 3175 } 3176 } 3177 3178 if (isa<ZExtInst>(LHS)) { 3179 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the 3180 // same type. 3181 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) { 3182 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 3183 // Compare X and Y. Note that signed predicates become unsigned. 3184 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 3185 SrcOp, RI->getOperand(0), Q, 3186 MaxRecurse-1)) 3187 return V; 3188 } 3189 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended 3190 // too. If not, then try to deduce the result of the comparison. 3191 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 3192 // Compute the constant that would happen if we truncated to SrcTy then 3193 // reextended to DstTy. 3194 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 3195 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy); 3196 3197 // If the re-extended constant didn't change then this is effectively 3198 // also a case of comparing two zero-extended values. 3199 if (RExt == CI && MaxRecurse) 3200 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 3201 SrcOp, Trunc, Q, MaxRecurse-1)) 3202 return V; 3203 3204 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit 3205 // there. Use this to work out the result of the comparison. 3206 if (RExt != CI) { 3207 switch (Pred) { 3208 default: llvm_unreachable("Unknown ICmp predicate!"); 3209 // LHS <u RHS. 3210 case ICmpInst::ICMP_EQ: 3211 case ICmpInst::ICMP_UGT: 3212 case ICmpInst::ICMP_UGE: 3213 return ConstantInt::getFalse(CI->getContext()); 3214 3215 case ICmpInst::ICMP_NE: 3216 case ICmpInst::ICMP_ULT: 3217 case ICmpInst::ICMP_ULE: 3218 return ConstantInt::getTrue(CI->getContext()); 3219 3220 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS 3221 // is non-negative then LHS <s RHS. 3222 case ICmpInst::ICMP_SGT: 3223 case ICmpInst::ICMP_SGE: 3224 return CI->getValue().isNegative() ? 3225 ConstantInt::getTrue(CI->getContext()) : 3226 ConstantInt::getFalse(CI->getContext()); 3227 3228 case ICmpInst::ICMP_SLT: 3229 case ICmpInst::ICMP_SLE: 3230 return CI->getValue().isNegative() ? 3231 ConstantInt::getFalse(CI->getContext()) : 3232 ConstantInt::getTrue(CI->getContext()); 3233 } 3234 } 3235 } 3236 } 3237 3238 if (isa<SExtInst>(LHS)) { 3239 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the 3240 // same type. 3241 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) { 3242 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 3243 // Compare X and Y. Note that the predicate does not change. 3244 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 3245 Q, MaxRecurse-1)) 3246 return V; 3247 } 3248 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended 3249 // too. If not, then try to deduce the result of the comparison. 3250 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 3251 // Compute the constant that would happen if we truncated to SrcTy then 3252 // reextended to DstTy. 3253 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 3254 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy); 3255 3256 // If the re-extended constant didn't change then this is effectively 3257 // also a case of comparing two sign-extended values. 3258 if (RExt == CI && MaxRecurse) 3259 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1)) 3260 return V; 3261 3262 // Otherwise the upper bits of LHS are all equal, while RHS has varying 3263 // bits there. Use this to work out the result of the comparison. 3264 if (RExt != CI) { 3265 switch (Pred) { 3266 default: llvm_unreachable("Unknown ICmp predicate!"); 3267 case ICmpInst::ICMP_EQ: 3268 return ConstantInt::getFalse(CI->getContext()); 3269 case ICmpInst::ICMP_NE: 3270 return ConstantInt::getTrue(CI->getContext()); 3271 3272 // If RHS is non-negative then LHS <s RHS. If RHS is negative then 3273 // LHS >s RHS. 3274 case ICmpInst::ICMP_SGT: 3275 case ICmpInst::ICMP_SGE: 3276 return CI->getValue().isNegative() ? 3277 ConstantInt::getTrue(CI->getContext()) : 3278 ConstantInt::getFalse(CI->getContext()); 3279 case ICmpInst::ICMP_SLT: 3280 case ICmpInst::ICMP_SLE: 3281 return CI->getValue().isNegative() ? 3282 ConstantInt::getFalse(CI->getContext()) : 3283 ConstantInt::getTrue(CI->getContext()); 3284 3285 // If LHS is non-negative then LHS <u RHS. If LHS is negative then 3286 // LHS >u RHS. 3287 case ICmpInst::ICMP_UGT: 3288 case ICmpInst::ICMP_UGE: 3289 // Comparison is true iff the LHS <s 0. 3290 if (MaxRecurse) 3291 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp, 3292 Constant::getNullValue(SrcTy), 3293 Q, MaxRecurse-1)) 3294 return V; 3295 break; 3296 case ICmpInst::ICMP_ULT: 3297 case ICmpInst::ICMP_ULE: 3298 // Comparison is true iff the LHS >=s 0. 3299 if (MaxRecurse) 3300 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp, 3301 Constant::getNullValue(SrcTy), 3302 Q, MaxRecurse-1)) 3303 return V; 3304 break; 3305 } 3306 } 3307 } 3308 } 3309 } 3310 3311 // icmp eq|ne X, Y -> false|true if X != Y 3312 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) && 3313 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT)) { 3314 LLVMContext &Ctx = LHS->getType()->getContext(); 3315 return Pred == ICmpInst::ICMP_NE ? 3316 ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx); 3317 } 3318 3319 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse)) 3320 return V; 3321 3322 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse)) 3323 return V; 3324 3325 // Simplify comparisons of related pointers using a powerful, recursive 3326 // GEP-walk when we have target data available.. 3327 if (LHS->getType()->isPointerTy()) 3328 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI, LHS, RHS)) 3329 return C; 3330 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS)) 3331 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS)) 3332 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) == 3333 Q.DL.getTypeSizeInBits(CLHS->getType()) && 3334 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) == 3335 Q.DL.getTypeSizeInBits(CRHS->getType())) 3336 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI, 3337 CLHS->getPointerOperand(), 3338 CRHS->getPointerOperand())) 3339 return C; 3340 3341 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) { 3342 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) { 3343 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() && 3344 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() && 3345 (ICmpInst::isEquality(Pred) || 3346 (GLHS->isInBounds() && GRHS->isInBounds() && 3347 Pred == ICmpInst::getSignedPredicate(Pred)))) { 3348 // The bases are equal and the indices are constant. Build a constant 3349 // expression GEP with the same indices and a null base pointer to see 3350 // what constant folding can make out of it. 3351 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType()); 3352 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end()); 3353 Constant *NewLHS = ConstantExpr::getGetElementPtr( 3354 GLHS->getSourceElementType(), Null, IndicesLHS); 3355 3356 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end()); 3357 Constant *NewRHS = ConstantExpr::getGetElementPtr( 3358 GLHS->getSourceElementType(), Null, IndicesRHS); 3359 return ConstantExpr::getICmp(Pred, NewLHS, NewRHS); 3360 } 3361 } 3362 } 3363 3364 // If a bit is known to be zero for A and known to be one for B, 3365 // then A and B cannot be equal. 3366 if (ICmpInst::isEquality(Pred)) { 3367 const APInt *RHSVal; 3368 if (match(RHS, m_APInt(RHSVal))) { 3369 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT); 3370 if (LHSKnown.Zero.intersects(*RHSVal) || 3371 !LHSKnown.One.isSubsetOf(*RHSVal)) 3372 return Pred == ICmpInst::ICMP_EQ ? ConstantInt::getFalse(ITy) 3373 : ConstantInt::getTrue(ITy); 3374 } 3375 } 3376 3377 // If the comparison is with the result of a select instruction, check whether 3378 // comparing with either branch of the select always yields the same value. 3379 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 3380 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 3381 return V; 3382 3383 // If the comparison is with the result of a phi instruction, check whether 3384 // doing the compare with each incoming phi value yields a common result. 3385 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 3386 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 3387 return V; 3388 3389 return nullptr; 3390 } 3391 3392 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3393 const SimplifyQuery &Q) { 3394 return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit); 3395 } 3396 3397 /// Given operands for an FCmpInst, see if we can fold the result. 3398 /// If not, this returns null. 3399 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3400 FastMathFlags FMF, const SimplifyQuery &Q, 3401 unsigned MaxRecurse) { 3402 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 3403 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!"); 3404 3405 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 3406 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 3407 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 3408 3409 // If we have a constant, make sure it is on the RHS. 3410 std::swap(LHS, RHS); 3411 Pred = CmpInst::getSwappedPredicate(Pred); 3412 } 3413 3414 // Fold trivial predicates. 3415 Type *RetTy = GetCompareTy(LHS); 3416 if (Pred == FCmpInst::FCMP_FALSE) 3417 return getFalse(RetTy); 3418 if (Pred == FCmpInst::FCMP_TRUE) 3419 return getTrue(RetTy); 3420 3421 // UNO/ORD predicates can be trivially folded if NaNs are ignored. 3422 if (FMF.noNaNs()) { 3423 if (Pred == FCmpInst::FCMP_UNO) 3424 return getFalse(RetTy); 3425 if (Pred == FCmpInst::FCMP_ORD) 3426 return getTrue(RetTy); 3427 } 3428 3429 // fcmp pred x, undef and fcmp pred undef, x 3430 // fold to true if unordered, false if ordered 3431 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) { 3432 // Choosing NaN for the undef will always make unordered comparison succeed 3433 // and ordered comparison fail. 3434 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred)); 3435 } 3436 3437 // fcmp x,x -> true/false. Not all compares are foldable. 3438 if (LHS == RHS) { 3439 if (CmpInst::isTrueWhenEqual(Pred)) 3440 return getTrue(RetTy); 3441 if (CmpInst::isFalseWhenEqual(Pred)) 3442 return getFalse(RetTy); 3443 } 3444 3445 // Handle fcmp with constant RHS 3446 const ConstantFP *CFP = nullptr; 3447 if (const auto *RHSC = dyn_cast<Constant>(RHS)) { 3448 if (RHS->getType()->isVectorTy()) 3449 CFP = dyn_cast_or_null<ConstantFP>(RHSC->getSplatValue()); 3450 else 3451 CFP = dyn_cast<ConstantFP>(RHSC); 3452 } 3453 if (CFP) { 3454 // If the constant is a nan, see if we can fold the comparison based on it. 3455 if (CFP->getValueAPF().isNaN()) { 3456 if (FCmpInst::isOrdered(Pred)) // True "if ordered and foo" 3457 return getFalse(RetTy); 3458 assert(FCmpInst::isUnordered(Pred) && 3459 "Comparison must be either ordered or unordered!"); 3460 // True if unordered. 3461 return getTrue(RetTy); 3462 } 3463 // Check whether the constant is an infinity. 3464 if (CFP->getValueAPF().isInfinity()) { 3465 if (CFP->getValueAPF().isNegative()) { 3466 switch (Pred) { 3467 case FCmpInst::FCMP_OLT: 3468 // No value is ordered and less than negative infinity. 3469 return getFalse(RetTy); 3470 case FCmpInst::FCMP_UGE: 3471 // All values are unordered with or at least negative infinity. 3472 return getTrue(RetTy); 3473 default: 3474 break; 3475 } 3476 } else { 3477 switch (Pred) { 3478 case FCmpInst::FCMP_OGT: 3479 // No value is ordered and greater than infinity. 3480 return getFalse(RetTy); 3481 case FCmpInst::FCMP_ULE: 3482 // All values are unordered with and at most infinity. 3483 return getTrue(RetTy); 3484 default: 3485 break; 3486 } 3487 } 3488 } 3489 if (CFP->getValueAPF().isZero()) { 3490 switch (Pred) { 3491 case FCmpInst::FCMP_UGE: 3492 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3493 return getTrue(RetTy); 3494 break; 3495 case FCmpInst::FCMP_OLT: 3496 // X < 0 3497 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3498 return getFalse(RetTy); 3499 break; 3500 default: 3501 break; 3502 } 3503 } 3504 } 3505 3506 // If the comparison is with the result of a select instruction, check whether 3507 // comparing with either branch of the select always yields the same value. 3508 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 3509 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 3510 return V; 3511 3512 // If the comparison is with the result of a phi instruction, check whether 3513 // doing the compare with each incoming phi value yields a common result. 3514 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 3515 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 3516 return V; 3517 3518 return nullptr; 3519 } 3520 3521 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3522 FastMathFlags FMF, const SimplifyQuery &Q) { 3523 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit); 3524 } 3525 3526 /// See if V simplifies when its operand Op is replaced with RepOp. 3527 static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, 3528 const SimplifyQuery &Q, 3529 unsigned MaxRecurse) { 3530 // Trivial replacement. 3531 if (V == Op) 3532 return RepOp; 3533 3534 // We cannot replace a constant, and shouldn't even try. 3535 if (isa<Constant>(Op)) 3536 return nullptr; 3537 3538 auto *I = dyn_cast<Instruction>(V); 3539 if (!I) 3540 return nullptr; 3541 3542 // If this is a binary operator, try to simplify it with the replaced op. 3543 if (auto *B = dyn_cast<BinaryOperator>(I)) { 3544 // Consider: 3545 // %cmp = icmp eq i32 %x, 2147483647 3546 // %add = add nsw i32 %x, 1 3547 // %sel = select i1 %cmp, i32 -2147483648, i32 %add 3548 // 3549 // We can't replace %sel with %add unless we strip away the flags. 3550 if (isa<OverflowingBinaryOperator>(B)) 3551 if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap()) 3552 return nullptr; 3553 if (isa<PossiblyExactOperator>(B)) 3554 if (B->isExact()) 3555 return nullptr; 3556 3557 if (MaxRecurse) { 3558 if (B->getOperand(0) == Op) 3559 return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q, 3560 MaxRecurse - 1); 3561 if (B->getOperand(1) == Op) 3562 return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q, 3563 MaxRecurse - 1); 3564 } 3565 } 3566 3567 // Same for CmpInsts. 3568 if (CmpInst *C = dyn_cast<CmpInst>(I)) { 3569 if (MaxRecurse) { 3570 if (C->getOperand(0) == Op) 3571 return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q, 3572 MaxRecurse - 1); 3573 if (C->getOperand(1) == Op) 3574 return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q, 3575 MaxRecurse - 1); 3576 } 3577 } 3578 3579 // TODO: We could hand off more cases to instsimplify here. 3580 3581 // If all operands are constant after substituting Op for RepOp then we can 3582 // constant fold the instruction. 3583 if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) { 3584 // Build a list of all constant operands. 3585 SmallVector<Constant *, 8> ConstOps; 3586 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3587 if (I->getOperand(i) == Op) 3588 ConstOps.push_back(CRepOp); 3589 else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i))) 3590 ConstOps.push_back(COp); 3591 else 3592 break; 3593 } 3594 3595 // All operands were constants, fold it. 3596 if (ConstOps.size() == I->getNumOperands()) { 3597 if (CmpInst *C = dyn_cast<CmpInst>(I)) 3598 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0], 3599 ConstOps[1], Q.DL, Q.TLI); 3600 3601 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 3602 if (!LI->isVolatile()) 3603 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL); 3604 3605 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI); 3606 } 3607 } 3608 3609 return nullptr; 3610 } 3611 3612 /// Try to simplify a select instruction when its condition operand is an 3613 /// integer comparison where one operand of the compare is a constant. 3614 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, 3615 const APInt *Y, bool TrueWhenUnset) { 3616 const APInt *C; 3617 3618 // (X & Y) == 0 ? X & ~Y : X --> X 3619 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y 3620 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) && 3621 *Y == ~*C) 3622 return TrueWhenUnset ? FalseVal : TrueVal; 3623 3624 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y 3625 // (X & Y) != 0 ? X : X & ~Y --> X 3626 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) && 3627 *Y == ~*C) 3628 return TrueWhenUnset ? FalseVal : TrueVal; 3629 3630 if (Y->isPowerOf2()) { 3631 // (X & Y) == 0 ? X | Y : X --> X | Y 3632 // (X & Y) != 0 ? X | Y : X --> X 3633 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) && 3634 *Y == *C) 3635 return TrueWhenUnset ? TrueVal : FalseVal; 3636 3637 // (X & Y) == 0 ? X : X | Y --> X 3638 // (X & Y) != 0 ? X : X | Y --> X | Y 3639 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) && 3640 *Y == *C) 3641 return TrueWhenUnset ? TrueVal : FalseVal; 3642 } 3643 3644 return nullptr; 3645 } 3646 3647 /// An alternative way to test if a bit is set or not uses sgt/slt instead of 3648 /// eq/ne. 3649 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *TrueVal, 3650 Value *FalseVal, 3651 bool TrueWhenUnset) { 3652 unsigned BitWidth = TrueVal->getType()->getScalarSizeInBits(); 3653 if (!BitWidth) 3654 return nullptr; 3655 3656 APInt MinSignedValue; 3657 Value *X; 3658 if (match(CmpLHS, m_Trunc(m_Value(X))) && (X == TrueVal || X == FalseVal)) { 3659 // icmp slt (trunc X), 0 <--> icmp ne (and X, C), 0 3660 // icmp sgt (trunc X), -1 <--> icmp eq (and X, C), 0 3661 unsigned DestSize = CmpLHS->getType()->getScalarSizeInBits(); 3662 MinSignedValue = APInt::getSignedMinValue(DestSize).zext(BitWidth); 3663 } else { 3664 // icmp slt X, 0 <--> icmp ne (and X, C), 0 3665 // icmp sgt X, -1 <--> icmp eq (and X, C), 0 3666 X = CmpLHS; 3667 MinSignedValue = APInt::getSignedMinValue(BitWidth); 3668 } 3669 3670 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, &MinSignedValue, 3671 TrueWhenUnset)) 3672 return V; 3673 3674 return nullptr; 3675 } 3676 3677 /// Try to simplify a select instruction when its condition operand is an 3678 /// integer comparison. 3679 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, 3680 Value *FalseVal, const SimplifyQuery &Q, 3681 unsigned MaxRecurse) { 3682 ICmpInst::Predicate Pred; 3683 Value *CmpLHS, *CmpRHS; 3684 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS)))) 3685 return nullptr; 3686 3687 // FIXME: This code is nearly duplicated in InstCombine. Using/refactoring 3688 // decomposeBitTestICmp() might help. 3689 if (ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero())) { 3690 Value *X; 3691 const APInt *Y; 3692 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y)))) 3693 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y, 3694 Pred == ICmpInst::ICMP_EQ)) 3695 return V; 3696 } else if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, m_Zero())) { 3697 // Comparing signed-less-than 0 checks if the sign bit is set. 3698 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, TrueVal, FalseVal, 3699 false)) 3700 return V; 3701 } else if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, m_AllOnes())) { 3702 // Comparing signed-greater-than -1 checks if the sign bit is not set. 3703 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, TrueVal, FalseVal, 3704 true)) 3705 return V; 3706 } 3707 3708 if (CondVal->hasOneUse()) { 3709 const APInt *C; 3710 if (match(CmpRHS, m_APInt(C))) { 3711 // X < MIN ? T : F --> F 3712 if (Pred == ICmpInst::ICMP_SLT && C->isMinSignedValue()) 3713 return FalseVal; 3714 // X < MIN ? T : F --> F 3715 if (Pred == ICmpInst::ICMP_ULT && C->isMinValue()) 3716 return FalseVal; 3717 // X > MAX ? T : F --> F 3718 if (Pred == ICmpInst::ICMP_SGT && C->isMaxSignedValue()) 3719 return FalseVal; 3720 // X > MAX ? T : F --> F 3721 if (Pred == ICmpInst::ICMP_UGT && C->isMaxValue()) 3722 return FalseVal; 3723 } 3724 } 3725 3726 // If we have an equality comparison, then we know the value in one of the 3727 // arms of the select. See if substituting this value into the arm and 3728 // simplifying the result yields the same value as the other arm. 3729 if (Pred == ICmpInst::ICMP_EQ) { 3730 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3731 TrueVal || 3732 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3733 TrueVal) 3734 return FalseVal; 3735 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3736 FalseVal || 3737 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3738 FalseVal) 3739 return FalseVal; 3740 } else if (Pred == ICmpInst::ICMP_NE) { 3741 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3742 FalseVal || 3743 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3744 FalseVal) 3745 return TrueVal; 3746 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3747 TrueVal || 3748 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3749 TrueVal) 3750 return TrueVal; 3751 } 3752 3753 return nullptr; 3754 } 3755 3756 /// Given operands for a SelectInst, see if we can fold the result. 3757 /// If not, this returns null. 3758 static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal, 3759 Value *FalseVal, const SimplifyQuery &Q, 3760 unsigned MaxRecurse) { 3761 // select true, X, Y -> X 3762 // select false, X, Y -> Y 3763 if (Constant *CB = dyn_cast<Constant>(CondVal)) { 3764 if (CB->isAllOnesValue()) 3765 return TrueVal; 3766 if (CB->isNullValue()) 3767 return FalseVal; 3768 } 3769 3770 // select C, X, X -> X 3771 if (TrueVal == FalseVal) 3772 return TrueVal; 3773 3774 if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y 3775 if (isa<Constant>(FalseVal)) 3776 return FalseVal; 3777 return TrueVal; 3778 } 3779 if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X 3780 return FalseVal; 3781 if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X 3782 return TrueVal; 3783 3784 if (Value *V = 3785 simplifySelectWithICmpCond(CondVal, TrueVal, FalseVal, Q, MaxRecurse)) 3786 return V; 3787 3788 return nullptr; 3789 } 3790 3791 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, 3792 const SimplifyQuery &Q) { 3793 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit); 3794 } 3795 3796 /// Given operands for an GetElementPtrInst, see if we can fold the result. 3797 /// If not, this returns null. 3798 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, 3799 const SimplifyQuery &Q, unsigned) { 3800 // The type of the GEP pointer operand. 3801 unsigned AS = 3802 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace(); 3803 3804 // getelementptr P -> P. 3805 if (Ops.size() == 1) 3806 return Ops[0]; 3807 3808 // Compute the (pointer) type returned by the GEP instruction. 3809 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1)); 3810 Type *GEPTy = PointerType::get(LastType, AS); 3811 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType())) 3812 GEPTy = VectorType::get(GEPTy, VT->getNumElements()); 3813 else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType())) 3814 GEPTy = VectorType::get(GEPTy, VT->getNumElements()); 3815 3816 if (isa<UndefValue>(Ops[0])) 3817 return UndefValue::get(GEPTy); 3818 3819 if (Ops.size() == 2) { 3820 // getelementptr P, 0 -> P. 3821 if (match(Ops[1], m_Zero())) 3822 return Ops[0]; 3823 3824 Type *Ty = SrcTy; 3825 if (Ty->isSized()) { 3826 Value *P; 3827 uint64_t C; 3828 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty); 3829 // getelementptr P, N -> P if P points to a type of zero size. 3830 if (TyAllocSize == 0) 3831 return Ops[0]; 3832 3833 // The following transforms are only safe if the ptrtoint cast 3834 // doesn't truncate the pointers. 3835 if (Ops[1]->getType()->getScalarSizeInBits() == 3836 Q.DL.getPointerSizeInBits(AS)) { 3837 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * { 3838 if (match(P, m_Zero())) 3839 return Constant::getNullValue(GEPTy); 3840 Value *Temp; 3841 if (match(P, m_PtrToInt(m_Value(Temp)))) 3842 if (Temp->getType() == GEPTy) 3843 return Temp; 3844 return nullptr; 3845 }; 3846 3847 // getelementptr V, (sub P, V) -> P if P points to a type of size 1. 3848 if (TyAllocSize == 1 && 3849 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))))) 3850 if (Value *R = PtrToIntOrZero(P)) 3851 return R; 3852 3853 // getelementptr V, (ashr (sub P, V), C) -> Q 3854 // if P points to a type of size 1 << C. 3855 if (match(Ops[1], 3856 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 3857 m_ConstantInt(C))) && 3858 TyAllocSize == 1ULL << C) 3859 if (Value *R = PtrToIntOrZero(P)) 3860 return R; 3861 3862 // getelementptr V, (sdiv (sub P, V), C) -> Q 3863 // if P points to a type of size C. 3864 if (match(Ops[1], 3865 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 3866 m_SpecificInt(TyAllocSize)))) 3867 if (Value *R = PtrToIntOrZero(P)) 3868 return R; 3869 } 3870 } 3871 } 3872 3873 if (Q.DL.getTypeAllocSize(LastType) == 1 && 3874 all_of(Ops.slice(1).drop_back(1), 3875 [](Value *Idx) { return match(Idx, m_Zero()); })) { 3876 unsigned PtrWidth = 3877 Q.DL.getPointerSizeInBits(Ops[0]->getType()->getPointerAddressSpace()); 3878 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == PtrWidth) { 3879 APInt BasePtrOffset(PtrWidth, 0); 3880 Value *StrippedBasePtr = 3881 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL, 3882 BasePtrOffset); 3883 3884 // gep (gep V, C), (sub 0, V) -> C 3885 if (match(Ops.back(), 3886 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr))))) { 3887 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset); 3888 return ConstantExpr::getIntToPtr(CI, GEPTy); 3889 } 3890 // gep (gep V, C), (xor V, -1) -> C-1 3891 if (match(Ops.back(), 3892 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes()))) { 3893 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1); 3894 return ConstantExpr::getIntToPtr(CI, GEPTy); 3895 } 3896 } 3897 } 3898 3899 // Check to see if this is constant foldable. 3900 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 3901 if (!isa<Constant>(Ops[i])) 3902 return nullptr; 3903 3904 return ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]), 3905 Ops.slice(1)); 3906 } 3907 3908 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, 3909 const SimplifyQuery &Q) { 3910 return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit); 3911 } 3912 3913 /// Given operands for an InsertValueInst, see if we can fold the result. 3914 /// If not, this returns null. 3915 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val, 3916 ArrayRef<unsigned> Idxs, const SimplifyQuery &Q, 3917 unsigned) { 3918 if (Constant *CAgg = dyn_cast<Constant>(Agg)) 3919 if (Constant *CVal = dyn_cast<Constant>(Val)) 3920 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs); 3921 3922 // insertvalue x, undef, n -> x 3923 if (match(Val, m_Undef())) 3924 return Agg; 3925 3926 // insertvalue x, (extractvalue y, n), n 3927 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val)) 3928 if (EV->getAggregateOperand()->getType() == Agg->getType() && 3929 EV->getIndices() == Idxs) { 3930 // insertvalue undef, (extractvalue y, n), n -> y 3931 if (match(Agg, m_Undef())) 3932 return EV->getAggregateOperand(); 3933 3934 // insertvalue y, (extractvalue y, n), n -> y 3935 if (Agg == EV->getAggregateOperand()) 3936 return Agg; 3937 } 3938 3939 return nullptr; 3940 } 3941 3942 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val, 3943 ArrayRef<unsigned> Idxs, 3944 const SimplifyQuery &Q) { 3945 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit); 3946 } 3947 3948 /// Given operands for an ExtractValueInst, see if we can fold the result. 3949 /// If not, this returns null. 3950 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 3951 const SimplifyQuery &, unsigned) { 3952 if (auto *CAgg = dyn_cast<Constant>(Agg)) 3953 return ConstantFoldExtractValueInstruction(CAgg, Idxs); 3954 3955 // extractvalue x, (insertvalue y, elt, n), n -> elt 3956 unsigned NumIdxs = Idxs.size(); 3957 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr; 3958 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) { 3959 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices(); 3960 unsigned NumInsertValueIdxs = InsertValueIdxs.size(); 3961 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs); 3962 if (InsertValueIdxs.slice(0, NumCommonIdxs) == 3963 Idxs.slice(0, NumCommonIdxs)) { 3964 if (NumIdxs == NumInsertValueIdxs) 3965 return IVI->getInsertedValueOperand(); 3966 break; 3967 } 3968 } 3969 3970 return nullptr; 3971 } 3972 3973 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 3974 const SimplifyQuery &Q) { 3975 return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit); 3976 } 3977 3978 /// Given operands for an ExtractElementInst, see if we can fold the result. 3979 /// If not, this returns null. 3980 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &, 3981 unsigned) { 3982 if (auto *CVec = dyn_cast<Constant>(Vec)) { 3983 if (auto *CIdx = dyn_cast<Constant>(Idx)) 3984 return ConstantFoldExtractElementInstruction(CVec, CIdx); 3985 3986 // The index is not relevant if our vector is a splat. 3987 if (auto *Splat = CVec->getSplatValue()) 3988 return Splat; 3989 3990 if (isa<UndefValue>(Vec)) 3991 return UndefValue::get(Vec->getType()->getVectorElementType()); 3992 } 3993 3994 // If extracting a specified index from the vector, see if we can recursively 3995 // find a previously computed scalar that was inserted into the vector. 3996 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) 3997 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue())) 3998 return Elt; 3999 4000 return nullptr; 4001 } 4002 4003 Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx, 4004 const SimplifyQuery &Q) { 4005 return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit); 4006 } 4007 4008 /// See if we can fold the given phi. If not, returns null. 4009 static Value *SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q) { 4010 // If all of the PHI's incoming values are the same then replace the PHI node 4011 // with the common value. 4012 Value *CommonValue = nullptr; 4013 bool HasUndefInput = false; 4014 for (Value *Incoming : PN->incoming_values()) { 4015 // If the incoming value is the phi node itself, it can safely be skipped. 4016 if (Incoming == PN) continue; 4017 if (isa<UndefValue>(Incoming)) { 4018 // Remember that we saw an undef value, but otherwise ignore them. 4019 HasUndefInput = true; 4020 continue; 4021 } 4022 if (CommonValue && Incoming != CommonValue) 4023 return nullptr; // Not the same, bail out. 4024 CommonValue = Incoming; 4025 } 4026 4027 // If CommonValue is null then all of the incoming values were either undef or 4028 // equal to the phi node itself. 4029 if (!CommonValue) 4030 return UndefValue::get(PN->getType()); 4031 4032 // If we have a PHI node like phi(X, undef, X), where X is defined by some 4033 // instruction, we cannot return X as the result of the PHI node unless it 4034 // dominates the PHI block. 4035 if (HasUndefInput) 4036 return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr; 4037 4038 return CommonValue; 4039 } 4040 4041 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op, 4042 Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) { 4043 if (auto *C = dyn_cast<Constant>(Op)) 4044 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL); 4045 4046 if (auto *CI = dyn_cast<CastInst>(Op)) { 4047 auto *Src = CI->getOperand(0); 4048 Type *SrcTy = Src->getType(); 4049 Type *MidTy = CI->getType(); 4050 Type *DstTy = Ty; 4051 if (Src->getType() == Ty) { 4052 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode()); 4053 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc); 4054 Type *SrcIntPtrTy = 4055 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr; 4056 Type *MidIntPtrTy = 4057 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr; 4058 Type *DstIntPtrTy = 4059 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr; 4060 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy, 4061 SrcIntPtrTy, MidIntPtrTy, 4062 DstIntPtrTy) == Instruction::BitCast) 4063 return Src; 4064 } 4065 } 4066 4067 // bitcast x -> x 4068 if (CastOpc == Instruction::BitCast) 4069 if (Op->getType() == Ty) 4070 return Op; 4071 4072 return nullptr; 4073 } 4074 4075 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, 4076 const SimplifyQuery &Q) { 4077 return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit); 4078 } 4079 4080 /// For the given destination element of a shuffle, peek through shuffles to 4081 /// match a root vector source operand that contains that element in the same 4082 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s). 4083 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, 4084 int MaskVal, Value *RootVec, 4085 unsigned MaxRecurse) { 4086 if (!MaxRecurse--) 4087 return nullptr; 4088 4089 // Bail out if any mask value is undefined. That kind of shuffle may be 4090 // simplified further based on demanded bits or other folds. 4091 if (MaskVal == -1) 4092 return nullptr; 4093 4094 // The mask value chooses which source operand we need to look at next. 4095 int InVecNumElts = Op0->getType()->getVectorNumElements(); 4096 int RootElt = MaskVal; 4097 Value *SourceOp = Op0; 4098 if (MaskVal >= InVecNumElts) { 4099 RootElt = MaskVal - InVecNumElts; 4100 SourceOp = Op1; 4101 } 4102 4103 // If the source operand is a shuffle itself, look through it to find the 4104 // matching root vector. 4105 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) { 4106 return foldIdentityShuffles( 4107 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1), 4108 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse); 4109 } 4110 4111 // TODO: Look through bitcasts? What if the bitcast changes the vector element 4112 // size? 4113 4114 // The source operand is not a shuffle. Initialize the root vector value for 4115 // this shuffle if that has not been done yet. 4116 if (!RootVec) 4117 RootVec = SourceOp; 4118 4119 // Give up as soon as a source operand does not match the existing root value. 4120 if (RootVec != SourceOp) 4121 return nullptr; 4122 4123 // The element must be coming from the same lane in the source vector 4124 // (although it may have crossed lanes in intermediate shuffles). 4125 if (RootElt != DestElt) 4126 return nullptr; 4127 4128 return RootVec; 4129 } 4130 4131 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask, 4132 Type *RetTy, const SimplifyQuery &Q, 4133 unsigned MaxRecurse) { 4134 if (isa<UndefValue>(Mask)) 4135 return UndefValue::get(RetTy); 4136 4137 Type *InVecTy = Op0->getType(); 4138 unsigned MaskNumElts = Mask->getType()->getVectorNumElements(); 4139 unsigned InVecNumElts = InVecTy->getVectorNumElements(); 4140 4141 SmallVector<int, 32> Indices; 4142 ShuffleVectorInst::getShuffleMask(Mask, Indices); 4143 assert(MaskNumElts == Indices.size() && 4144 "Size of Indices not same as number of mask elements?"); 4145 4146 // Canonicalization: If mask does not select elements from an input vector, 4147 // replace that input vector with undef. 4148 bool MaskSelects0 = false, MaskSelects1 = false; 4149 for (unsigned i = 0; i != MaskNumElts; ++i) { 4150 if (Indices[i] == -1) 4151 continue; 4152 if ((unsigned)Indices[i] < InVecNumElts) 4153 MaskSelects0 = true; 4154 else 4155 MaskSelects1 = true; 4156 } 4157 if (!MaskSelects0) 4158 Op0 = UndefValue::get(InVecTy); 4159 if (!MaskSelects1) 4160 Op1 = UndefValue::get(InVecTy); 4161 4162 auto *Op0Const = dyn_cast<Constant>(Op0); 4163 auto *Op1Const = dyn_cast<Constant>(Op1); 4164 4165 // If all operands are constant, constant fold the shuffle. 4166 if (Op0Const && Op1Const) 4167 return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask); 4168 4169 // Canonicalization: if only one input vector is constant, it shall be the 4170 // second one. 4171 if (Op0Const && !Op1Const) { 4172 std::swap(Op0, Op1); 4173 ShuffleVectorInst::commuteShuffleMask(Indices, InVecNumElts); 4174 } 4175 4176 // A shuffle of a splat is always the splat itself. Legal if the shuffle's 4177 // value type is same as the input vectors' type. 4178 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0)) 4179 if (isa<UndefValue>(Op1) && RetTy == InVecTy && 4180 OpShuf->getMask()->getSplatValue()) 4181 return Op0; 4182 4183 // Don't fold a shuffle with undef mask elements. This may get folded in a 4184 // better way using demanded bits or other analysis. 4185 // TODO: Should we allow this? 4186 if (find(Indices, -1) != Indices.end()) 4187 return nullptr; 4188 4189 // Check if every element of this shuffle can be mapped back to the 4190 // corresponding element of a single root vector. If so, we don't need this 4191 // shuffle. This handles simple identity shuffles as well as chains of 4192 // shuffles that may widen/narrow and/or move elements across lanes and back. 4193 Value *RootVec = nullptr; 4194 for (unsigned i = 0; i != MaskNumElts; ++i) { 4195 // Note that recursion is limited for each vector element, so if any element 4196 // exceeds the limit, this will fail to simplify. 4197 RootVec = 4198 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse); 4199 4200 // We can't replace a widening/narrowing shuffle with one of its operands. 4201 if (!RootVec || RootVec->getType() != RetTy) 4202 return nullptr; 4203 } 4204 return RootVec; 4205 } 4206 4207 /// Given operands for a ShuffleVectorInst, fold the result or return null. 4208 Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask, 4209 Type *RetTy, const SimplifyQuery &Q) { 4210 return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit); 4211 } 4212 4213 //=== Helper functions for higher up the class hierarchy. 4214 4215 /// Given operands for a BinaryOperator, see if we can fold the result. 4216 /// If not, this returns null. 4217 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4218 const SimplifyQuery &Q, unsigned MaxRecurse) { 4219 switch (Opcode) { 4220 case Instruction::Add: 4221 return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse); 4222 case Instruction::FAdd: 4223 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4224 case Instruction::Sub: 4225 return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse); 4226 case Instruction::FSub: 4227 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4228 case Instruction::Mul: 4229 return SimplifyMulInst(LHS, RHS, Q, MaxRecurse); 4230 case Instruction::FMul: 4231 return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4232 case Instruction::SDiv: 4233 return SimplifySDivInst(LHS, RHS, Q, MaxRecurse); 4234 case Instruction::UDiv: 4235 return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse); 4236 case Instruction::FDiv: 4237 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4238 case Instruction::SRem: 4239 return SimplifySRemInst(LHS, RHS, Q, MaxRecurse); 4240 case Instruction::URem: 4241 return SimplifyURemInst(LHS, RHS, Q, MaxRecurse); 4242 case Instruction::FRem: 4243 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4244 case Instruction::Shl: 4245 return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse); 4246 case Instruction::LShr: 4247 return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse); 4248 case Instruction::AShr: 4249 return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse); 4250 case Instruction::And: 4251 return SimplifyAndInst(LHS, RHS, Q, MaxRecurse); 4252 case Instruction::Or: 4253 return SimplifyOrInst(LHS, RHS, Q, MaxRecurse); 4254 case Instruction::Xor: 4255 return SimplifyXorInst(LHS, RHS, Q, MaxRecurse); 4256 default: 4257 llvm_unreachable("Unexpected opcode"); 4258 } 4259 } 4260 4261 /// Given operands for a BinaryOperator, see if we can fold the result. 4262 /// If not, this returns null. 4263 /// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the 4264 /// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp. 4265 static Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4266 const FastMathFlags &FMF, const SimplifyQuery &Q, 4267 unsigned MaxRecurse) { 4268 switch (Opcode) { 4269 case Instruction::FAdd: 4270 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse); 4271 case Instruction::FSub: 4272 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse); 4273 case Instruction::FMul: 4274 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse); 4275 case Instruction::FDiv: 4276 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse); 4277 default: 4278 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse); 4279 } 4280 } 4281 4282 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4283 const SimplifyQuery &Q) { 4284 return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit); 4285 } 4286 4287 Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4288 FastMathFlags FMF, const SimplifyQuery &Q) { 4289 return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit); 4290 } 4291 4292 /// Given operands for a CmpInst, see if we can fold the result. 4293 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 4294 const SimplifyQuery &Q, unsigned MaxRecurse) { 4295 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate)) 4296 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse); 4297 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4298 } 4299 4300 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 4301 const SimplifyQuery &Q) { 4302 return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit); 4303 } 4304 4305 static bool IsIdempotent(Intrinsic::ID ID) { 4306 switch (ID) { 4307 default: return false; 4308 4309 // Unary idempotent: f(f(x)) = f(x) 4310 case Intrinsic::fabs: 4311 case Intrinsic::floor: 4312 case Intrinsic::ceil: 4313 case Intrinsic::trunc: 4314 case Intrinsic::rint: 4315 case Intrinsic::nearbyint: 4316 case Intrinsic::round: 4317 return true; 4318 } 4319 } 4320 4321 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset, 4322 const DataLayout &DL) { 4323 GlobalValue *PtrSym; 4324 APInt PtrOffset; 4325 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL)) 4326 return nullptr; 4327 4328 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext()); 4329 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext()); 4330 Type *Int32PtrTy = Int32Ty->getPointerTo(); 4331 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext()); 4332 4333 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset); 4334 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64) 4335 return nullptr; 4336 4337 uint64_t OffsetInt = OffsetConstInt->getSExtValue(); 4338 if (OffsetInt % 4 != 0) 4339 return nullptr; 4340 4341 Constant *C = ConstantExpr::getGetElementPtr( 4342 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy), 4343 ConstantInt::get(Int64Ty, OffsetInt / 4)); 4344 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL); 4345 if (!Loaded) 4346 return nullptr; 4347 4348 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded); 4349 if (!LoadedCE) 4350 return nullptr; 4351 4352 if (LoadedCE->getOpcode() == Instruction::Trunc) { 4353 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0)); 4354 if (!LoadedCE) 4355 return nullptr; 4356 } 4357 4358 if (LoadedCE->getOpcode() != Instruction::Sub) 4359 return nullptr; 4360 4361 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0)); 4362 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt) 4363 return nullptr; 4364 auto *LoadedLHSPtr = LoadedLHS->getOperand(0); 4365 4366 Constant *LoadedRHS = LoadedCE->getOperand(1); 4367 GlobalValue *LoadedRHSSym; 4368 APInt LoadedRHSOffset; 4369 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset, 4370 DL) || 4371 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset) 4372 return nullptr; 4373 4374 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy); 4375 } 4376 4377 static bool maskIsAllZeroOrUndef(Value *Mask) { 4378 auto *ConstMask = dyn_cast<Constant>(Mask); 4379 if (!ConstMask) 4380 return false; 4381 if (ConstMask->isNullValue() || isa<UndefValue>(ConstMask)) 4382 return true; 4383 for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E; 4384 ++I) { 4385 if (auto *MaskElt = ConstMask->getAggregateElement(I)) 4386 if (MaskElt->isNullValue() || isa<UndefValue>(MaskElt)) 4387 continue; 4388 return false; 4389 } 4390 return true; 4391 } 4392 4393 template <typename IterTy> 4394 static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd, 4395 const SimplifyQuery &Q, unsigned MaxRecurse) { 4396 Intrinsic::ID IID = F->getIntrinsicID(); 4397 unsigned NumOperands = std::distance(ArgBegin, ArgEnd); 4398 4399 // Unary Ops 4400 if (NumOperands == 1) { 4401 // Perform idempotent optimizations 4402 if (IsIdempotent(IID)) { 4403 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin)) { 4404 if (II->getIntrinsicID() == IID) 4405 return II; 4406 } 4407 } 4408 4409 switch (IID) { 4410 case Intrinsic::fabs: { 4411 if (SignBitMustBeZero(*ArgBegin, Q.TLI)) 4412 return *ArgBegin; 4413 return nullptr; 4414 } 4415 default: 4416 return nullptr; 4417 } 4418 } 4419 4420 // Binary Ops 4421 if (NumOperands == 2) { 4422 Value *LHS = *ArgBegin; 4423 Value *RHS = *(ArgBegin + 1); 4424 Type *ReturnType = F->getReturnType(); 4425 4426 switch (IID) { 4427 case Intrinsic::usub_with_overflow: 4428 case Intrinsic::ssub_with_overflow: { 4429 // X - X -> { 0, false } 4430 if (LHS == RHS) 4431 return Constant::getNullValue(ReturnType); 4432 4433 // X - undef -> undef 4434 // undef - X -> undef 4435 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) 4436 return UndefValue::get(ReturnType); 4437 4438 return nullptr; 4439 } 4440 case Intrinsic::uadd_with_overflow: 4441 case Intrinsic::sadd_with_overflow: { 4442 // X + undef -> undef 4443 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) 4444 return UndefValue::get(ReturnType); 4445 4446 return nullptr; 4447 } 4448 case Intrinsic::umul_with_overflow: 4449 case Intrinsic::smul_with_overflow: { 4450 // 0 * X -> { 0, false } 4451 // X * 0 -> { 0, false } 4452 if (match(LHS, m_Zero()) || match(RHS, m_Zero())) 4453 return Constant::getNullValue(ReturnType); 4454 4455 // undef * X -> { 0, false } 4456 // X * undef -> { 0, false } 4457 if (match(LHS, m_Undef()) || match(RHS, m_Undef())) 4458 return Constant::getNullValue(ReturnType); 4459 4460 return nullptr; 4461 } 4462 case Intrinsic::load_relative: { 4463 Constant *C0 = dyn_cast<Constant>(LHS); 4464 Constant *C1 = dyn_cast<Constant>(RHS); 4465 if (C0 && C1) 4466 return SimplifyRelativeLoad(C0, C1, Q.DL); 4467 return nullptr; 4468 } 4469 default: 4470 return nullptr; 4471 } 4472 } 4473 4474 // Simplify calls to llvm.masked.load.* 4475 switch (IID) { 4476 case Intrinsic::masked_load: { 4477 Value *MaskArg = ArgBegin[2]; 4478 Value *PassthruArg = ArgBegin[3]; 4479 // If the mask is all zeros or undef, the "passthru" argument is the result. 4480 if (maskIsAllZeroOrUndef(MaskArg)) 4481 return PassthruArg; 4482 return nullptr; 4483 } 4484 default: 4485 return nullptr; 4486 } 4487 } 4488 4489 template <typename IterTy> 4490 static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd, 4491 const SimplifyQuery &Q, unsigned MaxRecurse) { 4492 Type *Ty = V->getType(); 4493 if (PointerType *PTy = dyn_cast<PointerType>(Ty)) 4494 Ty = PTy->getElementType(); 4495 FunctionType *FTy = cast<FunctionType>(Ty); 4496 4497 // call undef -> undef 4498 // call null -> undef 4499 if (isa<UndefValue>(V) || isa<ConstantPointerNull>(V)) 4500 return UndefValue::get(FTy->getReturnType()); 4501 4502 Function *F = dyn_cast<Function>(V); 4503 if (!F) 4504 return nullptr; 4505 4506 if (F->isIntrinsic()) 4507 if (Value *Ret = SimplifyIntrinsic(F, ArgBegin, ArgEnd, Q, MaxRecurse)) 4508 return Ret; 4509 4510 if (!canConstantFoldCallTo(F)) 4511 return nullptr; 4512 4513 SmallVector<Constant *, 4> ConstantArgs; 4514 ConstantArgs.reserve(ArgEnd - ArgBegin); 4515 for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) { 4516 Constant *C = dyn_cast<Constant>(*I); 4517 if (!C) 4518 return nullptr; 4519 ConstantArgs.push_back(C); 4520 } 4521 4522 return ConstantFoldCall(F, ConstantArgs, Q.TLI); 4523 } 4524 4525 Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin, 4526 User::op_iterator ArgEnd, const SimplifyQuery &Q) { 4527 return ::SimplifyCall(V, ArgBegin, ArgEnd, Q, RecursionLimit); 4528 } 4529 4530 Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args, 4531 const SimplifyQuery &Q) { 4532 return ::SimplifyCall(V, Args.begin(), Args.end(), Q, RecursionLimit); 4533 } 4534 4535 /// See if we can compute a simplified version of this instruction. 4536 /// If not, this returns null. 4537 4538 Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ, 4539 OptimizationRemarkEmitter *ORE) { 4540 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I); 4541 Value *Result; 4542 4543 switch (I->getOpcode()) { 4544 default: 4545 Result = ConstantFoldInstruction(I, Q.DL, Q.TLI); 4546 break; 4547 case Instruction::FAdd: 4548 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1), 4549 I->getFastMathFlags(), Q); 4550 break; 4551 case Instruction::Add: 4552 Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1), 4553 cast<BinaryOperator>(I)->hasNoSignedWrap(), 4554 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q); 4555 break; 4556 case Instruction::FSub: 4557 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1), 4558 I->getFastMathFlags(), Q); 4559 break; 4560 case Instruction::Sub: 4561 Result = SimplifySubInst(I->getOperand(0), I->getOperand(1), 4562 cast<BinaryOperator>(I)->hasNoSignedWrap(), 4563 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q); 4564 break; 4565 case Instruction::FMul: 4566 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1), 4567 I->getFastMathFlags(), Q); 4568 break; 4569 case Instruction::Mul: 4570 Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), Q); 4571 break; 4572 case Instruction::SDiv: 4573 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), Q); 4574 break; 4575 case Instruction::UDiv: 4576 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), Q); 4577 break; 4578 case Instruction::FDiv: 4579 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), 4580 I->getFastMathFlags(), Q); 4581 break; 4582 case Instruction::SRem: 4583 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), Q); 4584 break; 4585 case Instruction::URem: 4586 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), Q); 4587 break; 4588 case Instruction::FRem: 4589 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), 4590 I->getFastMathFlags(), Q); 4591 break; 4592 case Instruction::Shl: 4593 Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1), 4594 cast<BinaryOperator>(I)->hasNoSignedWrap(), 4595 cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q); 4596 break; 4597 case Instruction::LShr: 4598 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1), 4599 cast<BinaryOperator>(I)->isExact(), Q); 4600 break; 4601 case Instruction::AShr: 4602 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1), 4603 cast<BinaryOperator>(I)->isExact(), Q); 4604 break; 4605 case Instruction::And: 4606 Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q); 4607 break; 4608 case Instruction::Or: 4609 Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), Q); 4610 break; 4611 case Instruction::Xor: 4612 Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), Q); 4613 break; 4614 case Instruction::ICmp: 4615 Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), 4616 I->getOperand(0), I->getOperand(1), Q); 4617 break; 4618 case Instruction::FCmp: 4619 Result = 4620 SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0), 4621 I->getOperand(1), I->getFastMathFlags(), Q); 4622 break; 4623 case Instruction::Select: 4624 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1), 4625 I->getOperand(2), Q); 4626 break; 4627 case Instruction::GetElementPtr: { 4628 SmallVector<Value *, 8> Ops(I->op_begin(), I->op_end()); 4629 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(), 4630 Ops, Q); 4631 break; 4632 } 4633 case Instruction::InsertValue: { 4634 InsertValueInst *IV = cast<InsertValueInst>(I); 4635 Result = SimplifyInsertValueInst(IV->getAggregateOperand(), 4636 IV->getInsertedValueOperand(), 4637 IV->getIndices(), Q); 4638 break; 4639 } 4640 case Instruction::ExtractValue: { 4641 auto *EVI = cast<ExtractValueInst>(I); 4642 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(), 4643 EVI->getIndices(), Q); 4644 break; 4645 } 4646 case Instruction::ExtractElement: { 4647 auto *EEI = cast<ExtractElementInst>(I); 4648 Result = SimplifyExtractElementInst(EEI->getVectorOperand(), 4649 EEI->getIndexOperand(), Q); 4650 break; 4651 } 4652 case Instruction::ShuffleVector: { 4653 auto *SVI = cast<ShuffleVectorInst>(I); 4654 Result = SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 4655 SVI->getMask(), SVI->getType(), Q); 4656 break; 4657 } 4658 case Instruction::PHI: 4659 Result = SimplifyPHINode(cast<PHINode>(I), Q); 4660 break; 4661 case Instruction::Call: { 4662 CallSite CS(cast<CallInst>(I)); 4663 Result = SimplifyCall(CS.getCalledValue(), CS.arg_begin(), CS.arg_end(), Q); 4664 break; 4665 } 4666 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc: 4667 #include "llvm/IR/Instruction.def" 4668 #undef HANDLE_CAST_INST 4669 Result = 4670 SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), Q); 4671 break; 4672 case Instruction::Alloca: 4673 // No simplifications for Alloca and it can't be constant folded. 4674 Result = nullptr; 4675 break; 4676 } 4677 4678 // In general, it is possible for computeKnownBits to determine all bits in a 4679 // value even when the operands are not all constants. 4680 if (!Result && I->getType()->isIntOrIntVectorTy()) { 4681 KnownBits Known = computeKnownBits(I, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE); 4682 if (Known.isConstant()) 4683 Result = ConstantInt::get(I->getType(), Known.getConstant()); 4684 } 4685 4686 /// If called on unreachable code, the above logic may report that the 4687 /// instruction simplified to itself. Make life easier for users by 4688 /// detecting that case here, returning a safe value instead. 4689 return Result == I ? UndefValue::get(I->getType()) : Result; 4690 } 4691 4692 /// \brief Implementation of recursive simplification through an instruction's 4693 /// uses. 4694 /// 4695 /// This is the common implementation of the recursive simplification routines. 4696 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to 4697 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of 4698 /// instructions to process and attempt to simplify it using 4699 /// InstructionSimplify. 4700 /// 4701 /// This routine returns 'true' only when *it* simplifies something. The passed 4702 /// in simplified value does not count toward this. 4703 static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, 4704 const TargetLibraryInfo *TLI, 4705 const DominatorTree *DT, 4706 AssumptionCache *AC) { 4707 bool Simplified = false; 4708 SmallSetVector<Instruction *, 8> Worklist; 4709 const DataLayout &DL = I->getModule()->getDataLayout(); 4710 4711 // If we have an explicit value to collapse to, do that round of the 4712 // simplification loop by hand initially. 4713 if (SimpleV) { 4714 for (User *U : I->users()) 4715 if (U != I) 4716 Worklist.insert(cast<Instruction>(U)); 4717 4718 // Replace the instruction with its simplified value. 4719 I->replaceAllUsesWith(SimpleV); 4720 4721 // Gracefully handle edge cases where the instruction is not wired into any 4722 // parent block. 4723 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) && 4724 !I->mayHaveSideEffects()) 4725 I->eraseFromParent(); 4726 } else { 4727 Worklist.insert(I); 4728 } 4729 4730 // Note that we must test the size on each iteration, the worklist can grow. 4731 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) { 4732 I = Worklist[Idx]; 4733 4734 // See if this instruction simplifies. 4735 SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC}); 4736 if (!SimpleV) 4737 continue; 4738 4739 Simplified = true; 4740 4741 // Stash away all the uses of the old instruction so we can check them for 4742 // recursive simplifications after a RAUW. This is cheaper than checking all 4743 // uses of To on the recursive step in most cases. 4744 for (User *U : I->users()) 4745 Worklist.insert(cast<Instruction>(U)); 4746 4747 // Replace the instruction with its simplified value. 4748 I->replaceAllUsesWith(SimpleV); 4749 4750 // Gracefully handle edge cases where the instruction is not wired into any 4751 // parent block. 4752 if (I->getParent() && !I->isEHPad() && !isa<TerminatorInst>(I) && 4753 !I->mayHaveSideEffects()) 4754 I->eraseFromParent(); 4755 } 4756 return Simplified; 4757 } 4758 4759 bool llvm::recursivelySimplifyInstruction(Instruction *I, 4760 const TargetLibraryInfo *TLI, 4761 const DominatorTree *DT, 4762 AssumptionCache *AC) { 4763 return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC); 4764 } 4765 4766 bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, 4767 const TargetLibraryInfo *TLI, 4768 const DominatorTree *DT, 4769 AssumptionCache *AC) { 4770 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!"); 4771 assert(SimpleV && "Must provide a simplified value."); 4772 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC); 4773 } 4774 4775 namespace llvm { 4776 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) { 4777 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 4778 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; 4779 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 4780 auto *TLI = TLIWP ? &TLIWP->getTLI() : nullptr; 4781 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>(); 4782 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr; 4783 return {F.getParent()->getDataLayout(), TLI, DT, AC}; 4784 } 4785 4786 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR, 4787 const DataLayout &DL) { 4788 return {DL, &AR.TLI, &AR.DT, &AR.AC}; 4789 } 4790 4791 template <class T, class... TArgs> 4792 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM, 4793 Function &F) { 4794 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F); 4795 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F); 4796 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F); 4797 return {F.getParent()->getDataLayout(), TLI, DT, AC}; 4798 } 4799 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &, 4800 Function &); 4801 } 4802