1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements routines for folding instructions into simpler forms 10 // that do not require creating new instructions. This does constant folding 11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either 12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value 13 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been 14 // simplified: This is usually true and assuming it simplifies the logic (if 15 // they have not been simplified then results are correct but maybe suboptimal). 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/CmpInstAnalysis.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/LoopAnalysisManager.h" 28 #include "llvm/Analysis/MemoryBuiltins.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Analysis/VectorUtils.h" 31 #include "llvm/IR/ConstantRange.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/Dominators.h" 34 #include "llvm/IR/GetElementPtrTypeIterator.h" 35 #include "llvm/IR/GlobalAlias.h" 36 #include "llvm/IR/InstrTypes.h" 37 #include "llvm/IR/Instructions.h" 38 #include "llvm/IR/Operator.h" 39 #include "llvm/IR/PatternMatch.h" 40 #include "llvm/IR/ValueHandle.h" 41 #include "llvm/Support/KnownBits.h" 42 #include <algorithm> 43 using namespace llvm; 44 using namespace llvm::PatternMatch; 45 46 #define DEBUG_TYPE "instsimplify" 47 48 enum { RecursionLimit = 3 }; 49 50 STATISTIC(NumExpand, "Number of expansions"); 51 STATISTIC(NumReassoc, "Number of reassociations"); 52 53 static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned); 54 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned); 55 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, 56 const SimplifyQuery &, unsigned); 57 static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, 58 unsigned); 59 static Value *SimplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &, 60 const SimplifyQuery &, unsigned); 61 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &, 62 unsigned); 63 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 64 const SimplifyQuery &Q, unsigned MaxRecurse); 65 static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned); 66 static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned); 67 static Value *SimplifyCastInst(unsigned, Value *, Type *, 68 const SimplifyQuery &, unsigned); 69 static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, const SimplifyQuery &, 70 unsigned); 71 72 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal, 73 Value *FalseVal) { 74 BinaryOperator::BinaryOps BinOpCode; 75 if (auto *BO = dyn_cast<BinaryOperator>(Cond)) 76 BinOpCode = BO->getOpcode(); 77 else 78 return nullptr; 79 80 CmpInst::Predicate ExpectedPred, Pred1, Pred2; 81 if (BinOpCode == BinaryOperator::Or) { 82 ExpectedPred = ICmpInst::ICMP_NE; 83 } else if (BinOpCode == BinaryOperator::And) { 84 ExpectedPred = ICmpInst::ICMP_EQ; 85 } else 86 return nullptr; 87 88 // %A = icmp eq %TV, %FV 89 // %B = icmp eq %X, %Y (and one of these is a select operand) 90 // %C = and %A, %B 91 // %D = select %C, %TV, %FV 92 // --> 93 // %FV 94 95 // %A = icmp ne %TV, %FV 96 // %B = icmp ne %X, %Y (and one of these is a select operand) 97 // %C = or %A, %B 98 // %D = select %C, %TV, %FV 99 // --> 100 // %TV 101 Value *X, *Y; 102 if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal), 103 m_Specific(FalseVal)), 104 m_ICmp(Pred2, m_Value(X), m_Value(Y)))) || 105 Pred1 != Pred2 || Pred1 != ExpectedPred) 106 return nullptr; 107 108 if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal) 109 return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal; 110 111 return nullptr; 112 } 113 114 /// For a boolean type or a vector of boolean type, return false or a vector 115 /// with every element false. 116 static Constant *getFalse(Type *Ty) { 117 return ConstantInt::getFalse(Ty); 118 } 119 120 /// For a boolean type or a vector of boolean type, return true or a vector 121 /// with every element true. 122 static Constant *getTrue(Type *Ty) { 123 return ConstantInt::getTrue(Ty); 124 } 125 126 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"? 127 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, 128 Value *RHS) { 129 CmpInst *Cmp = dyn_cast<CmpInst>(V); 130 if (!Cmp) 131 return false; 132 CmpInst::Predicate CPred = Cmp->getPredicate(); 133 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1); 134 if (CPred == Pred && CLHS == LHS && CRHS == RHS) 135 return true; 136 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS && 137 CRHS == LHS; 138 } 139 140 /// Does the given value dominate the specified phi node? 141 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) { 142 Instruction *I = dyn_cast<Instruction>(V); 143 if (!I) 144 // Arguments and constants dominate all instructions. 145 return true; 146 147 // If we are processing instructions (and/or basic blocks) that have not been 148 // fully added to a function, the parent nodes may still be null. Simply 149 // return the conservative answer in these cases. 150 if (!I->getParent() || !P->getParent() || !I->getFunction()) 151 return false; 152 153 // If we have a DominatorTree then do a precise test. 154 if (DT) 155 return DT->dominates(I, P); 156 157 // Otherwise, if the instruction is in the entry block and is not an invoke, 158 // then it obviously dominates all phi nodes. 159 if (I->getParent() == &I->getFunction()->getEntryBlock() && 160 !isa<InvokeInst>(I)) 161 return true; 162 163 return false; 164 } 165 166 /// Simplify "A op (B op' C)" by distributing op over op', turning it into 167 /// "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is 168 /// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS. 169 /// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)". 170 /// Returns the simplified value, or null if no simplification was performed. 171 static Value *ExpandBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, 172 Instruction::BinaryOps OpcodeToExpand, 173 const SimplifyQuery &Q, unsigned MaxRecurse) { 174 // Recursion is always used, so bail out at once if we already hit the limit. 175 if (!MaxRecurse--) 176 return nullptr; 177 178 // Check whether the expression has the form "(A op' B) op C". 179 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS)) 180 if (Op0->getOpcode() == OpcodeToExpand) { 181 // It does! Try turning it into "(A op C) op' (B op C)". 182 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 183 // Do "A op C" and "B op C" both simplify? 184 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) 185 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 186 // They do! Return "L op' R" if it simplifies or is already available. 187 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 188 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand) 189 && L == B && R == A)) { 190 ++NumExpand; 191 return LHS; 192 } 193 // Otherwise return "L op' R" if it simplifies. 194 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 195 ++NumExpand; 196 return V; 197 } 198 } 199 } 200 201 // Check whether the expression has the form "A op (B op' C)". 202 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS)) 203 if (Op1->getOpcode() == OpcodeToExpand) { 204 // It does! Try turning it into "(A op B) op' (A op C)". 205 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 206 // Do "A op B" and "A op C" both simplify? 207 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) 208 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) { 209 // They do! Return "L op' R" if it simplifies or is already available. 210 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 211 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand) 212 && L == C && R == B)) { 213 ++NumExpand; 214 return RHS; 215 } 216 // Otherwise return "L op' R" if it simplifies. 217 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 218 ++NumExpand; 219 return V; 220 } 221 } 222 } 223 224 return nullptr; 225 } 226 227 /// Generic simplifications for associative binary operations. 228 /// Returns the simpler value, or null if none was found. 229 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode, 230 Value *LHS, Value *RHS, 231 const SimplifyQuery &Q, 232 unsigned MaxRecurse) { 233 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!"); 234 235 // Recursion is always used, so bail out at once if we already hit the limit. 236 if (!MaxRecurse--) 237 return nullptr; 238 239 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 240 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 241 242 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely. 243 if (Op0 && Op0->getOpcode() == Opcode) { 244 Value *A = Op0->getOperand(0); 245 Value *B = Op0->getOperand(1); 246 Value *C = RHS; 247 248 // Does "B op C" simplify? 249 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 250 // It does! Return "A op V" if it simplifies or is already available. 251 // If V equals B then "A op V" is just the LHS. 252 if (V == B) return LHS; 253 // Otherwise return "A op V" if it simplifies. 254 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) { 255 ++NumReassoc; 256 return W; 257 } 258 } 259 } 260 261 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely. 262 if (Op1 && Op1->getOpcode() == Opcode) { 263 Value *A = LHS; 264 Value *B = Op1->getOperand(0); 265 Value *C = Op1->getOperand(1); 266 267 // Does "A op B" simplify? 268 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) { 269 // It does! Return "V op C" if it simplifies or is already available. 270 // If V equals B then "V op C" is just the RHS. 271 if (V == B) return RHS; 272 // Otherwise return "V op C" if it simplifies. 273 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) { 274 ++NumReassoc; 275 return W; 276 } 277 } 278 } 279 280 // The remaining transforms require commutativity as well as associativity. 281 if (!Instruction::isCommutative(Opcode)) 282 return nullptr; 283 284 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely. 285 if (Op0 && Op0->getOpcode() == Opcode) { 286 Value *A = Op0->getOperand(0); 287 Value *B = Op0->getOperand(1); 288 Value *C = RHS; 289 290 // Does "C op A" simplify? 291 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 292 // It does! Return "V op B" if it simplifies or is already available. 293 // If V equals A then "V op B" is just the LHS. 294 if (V == A) return LHS; 295 // Otherwise return "V op B" if it simplifies. 296 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) { 297 ++NumReassoc; 298 return W; 299 } 300 } 301 } 302 303 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely. 304 if (Op1 && Op1->getOpcode() == Opcode) { 305 Value *A = LHS; 306 Value *B = Op1->getOperand(0); 307 Value *C = Op1->getOperand(1); 308 309 // Does "C op A" simplify? 310 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 311 // It does! Return "B op V" if it simplifies or is already available. 312 // If V equals C then "B op V" is just the RHS. 313 if (V == C) return RHS; 314 // Otherwise return "B op V" if it simplifies. 315 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) { 316 ++NumReassoc; 317 return W; 318 } 319 } 320 } 321 322 return nullptr; 323 } 324 325 /// In the case of a binary operation with a select instruction as an operand, 326 /// try to simplify the binop by seeing whether evaluating it on both branches 327 /// of the select results in the same value. Returns the common value if so, 328 /// otherwise returns null. 329 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, 330 Value *RHS, const SimplifyQuery &Q, 331 unsigned MaxRecurse) { 332 // Recursion is always used, so bail out at once if we already hit the limit. 333 if (!MaxRecurse--) 334 return nullptr; 335 336 SelectInst *SI; 337 if (isa<SelectInst>(LHS)) { 338 SI = cast<SelectInst>(LHS); 339 } else { 340 assert(isa<SelectInst>(RHS) && "No select instruction operand!"); 341 SI = cast<SelectInst>(RHS); 342 } 343 344 // Evaluate the BinOp on the true and false branches of the select. 345 Value *TV; 346 Value *FV; 347 if (SI == LHS) { 348 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse); 349 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse); 350 } else { 351 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse); 352 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse); 353 } 354 355 // If they simplified to the same value, then return the common value. 356 // If they both failed to simplify then return null. 357 if (TV == FV) 358 return TV; 359 360 // If one branch simplified to undef, return the other one. 361 if (TV && isa<UndefValue>(TV)) 362 return FV; 363 if (FV && isa<UndefValue>(FV)) 364 return TV; 365 366 // If applying the operation did not change the true and false select values, 367 // then the result of the binop is the select itself. 368 if (TV == SI->getTrueValue() && FV == SI->getFalseValue()) 369 return SI; 370 371 // If one branch simplified and the other did not, and the simplified 372 // value is equal to the unsimplified one, return the simplified value. 373 // For example, select (cond, X, X & Z) & Z -> X & Z. 374 if ((FV && !TV) || (TV && !FV)) { 375 // Check that the simplified value has the form "X op Y" where "op" is the 376 // same as the original operation. 377 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV); 378 if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) { 379 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS". 380 // We already know that "op" is the same as for the simplified value. See 381 // if the operands match too. If so, return the simplified value. 382 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue(); 383 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS; 384 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch; 385 if (Simplified->getOperand(0) == UnsimplifiedLHS && 386 Simplified->getOperand(1) == UnsimplifiedRHS) 387 return Simplified; 388 if (Simplified->isCommutative() && 389 Simplified->getOperand(1) == UnsimplifiedLHS && 390 Simplified->getOperand(0) == UnsimplifiedRHS) 391 return Simplified; 392 } 393 } 394 395 return nullptr; 396 } 397 398 /// In the case of a comparison with a select instruction, try to simplify the 399 /// comparison by seeing whether both branches of the select result in the same 400 /// value. Returns the common value if so, otherwise returns null. 401 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS, 402 Value *RHS, const SimplifyQuery &Q, 403 unsigned MaxRecurse) { 404 // Recursion is always used, so bail out at once if we already hit the limit. 405 if (!MaxRecurse--) 406 return nullptr; 407 408 // Make sure the select is on the LHS. 409 if (!isa<SelectInst>(LHS)) { 410 std::swap(LHS, RHS); 411 Pred = CmpInst::getSwappedPredicate(Pred); 412 } 413 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!"); 414 SelectInst *SI = cast<SelectInst>(LHS); 415 Value *Cond = SI->getCondition(); 416 Value *TV = SI->getTrueValue(); 417 Value *FV = SI->getFalseValue(); 418 419 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it. 420 // Does "cmp TV, RHS" simplify? 421 Value *TCmp = SimplifyCmpInst(Pred, TV, RHS, Q, MaxRecurse); 422 if (TCmp == Cond) { 423 // It not only simplified, it simplified to the select condition. Replace 424 // it with 'true'. 425 TCmp = getTrue(Cond->getType()); 426 } else if (!TCmp) { 427 // It didn't simplify. However if "cmp TV, RHS" is equal to the select 428 // condition then we can replace it with 'true'. Otherwise give up. 429 if (!isSameCompare(Cond, Pred, TV, RHS)) 430 return nullptr; 431 TCmp = getTrue(Cond->getType()); 432 } 433 434 // Does "cmp FV, RHS" simplify? 435 Value *FCmp = SimplifyCmpInst(Pred, FV, RHS, Q, MaxRecurse); 436 if (FCmp == Cond) { 437 // It not only simplified, it simplified to the select condition. Replace 438 // it with 'false'. 439 FCmp = getFalse(Cond->getType()); 440 } else if (!FCmp) { 441 // It didn't simplify. However if "cmp FV, RHS" is equal to the select 442 // condition then we can replace it with 'false'. Otherwise give up. 443 if (!isSameCompare(Cond, Pred, FV, RHS)) 444 return nullptr; 445 FCmp = getFalse(Cond->getType()); 446 } 447 448 // If both sides simplified to the same value, then use it as the result of 449 // the original comparison. 450 if (TCmp == FCmp) 451 return TCmp; 452 453 // The remaining cases only make sense if the select condition has the same 454 // type as the result of the comparison, so bail out if this is not so. 455 if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy()) 456 return nullptr; 457 // If the false value simplified to false, then the result of the compare 458 // is equal to "Cond && TCmp". This also catches the case when the false 459 // value simplified to false and the true value to true, returning "Cond". 460 if (match(FCmp, m_Zero())) 461 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse)) 462 return V; 463 // If the true value simplified to true, then the result of the compare 464 // is equal to "Cond || FCmp". 465 if (match(TCmp, m_One())) 466 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse)) 467 return V; 468 // Finally, if the false value simplified to true and the true value to 469 // false, then the result of the compare is equal to "!Cond". 470 if (match(FCmp, m_One()) && match(TCmp, m_Zero())) 471 if (Value *V = 472 SimplifyXorInst(Cond, Constant::getAllOnesValue(Cond->getType()), 473 Q, MaxRecurse)) 474 return V; 475 476 return nullptr; 477 } 478 479 /// In the case of a binary operation with an operand that is a PHI instruction, 480 /// try to simplify the binop by seeing whether evaluating it on the incoming 481 /// phi values yields the same result for every value. If so returns the common 482 /// value, otherwise returns null. 483 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, 484 Value *RHS, const SimplifyQuery &Q, 485 unsigned MaxRecurse) { 486 // Recursion is always used, so bail out at once if we already hit the limit. 487 if (!MaxRecurse--) 488 return nullptr; 489 490 PHINode *PI; 491 if (isa<PHINode>(LHS)) { 492 PI = cast<PHINode>(LHS); 493 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 494 if (!valueDominatesPHI(RHS, PI, Q.DT)) 495 return nullptr; 496 } else { 497 assert(isa<PHINode>(RHS) && "No PHI instruction operand!"); 498 PI = cast<PHINode>(RHS); 499 // Bail out if LHS and the phi may be mutually interdependent due to a loop. 500 if (!valueDominatesPHI(LHS, PI, Q.DT)) 501 return nullptr; 502 } 503 504 // Evaluate the BinOp on the incoming phi values. 505 Value *CommonValue = nullptr; 506 for (Value *Incoming : PI->incoming_values()) { 507 // If the incoming value is the phi node itself, it can safely be skipped. 508 if (Incoming == PI) continue; 509 Value *V = PI == LHS ? 510 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) : 511 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse); 512 // If the operation failed to simplify, or simplified to a different value 513 // to previously, then give up. 514 if (!V || (CommonValue && V != CommonValue)) 515 return nullptr; 516 CommonValue = V; 517 } 518 519 return CommonValue; 520 } 521 522 /// In the case of a comparison with a PHI instruction, try to simplify the 523 /// comparison by seeing whether comparing with all of the incoming phi values 524 /// yields the same result every time. If so returns the common result, 525 /// otherwise returns null. 526 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS, 527 const SimplifyQuery &Q, unsigned MaxRecurse) { 528 // Recursion is always used, so bail out at once if we already hit the limit. 529 if (!MaxRecurse--) 530 return nullptr; 531 532 // Make sure the phi is on the LHS. 533 if (!isa<PHINode>(LHS)) { 534 std::swap(LHS, RHS); 535 Pred = CmpInst::getSwappedPredicate(Pred); 536 } 537 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!"); 538 PHINode *PI = cast<PHINode>(LHS); 539 540 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 541 if (!valueDominatesPHI(RHS, PI, Q.DT)) 542 return nullptr; 543 544 // Evaluate the BinOp on the incoming phi values. 545 Value *CommonValue = nullptr; 546 for (Value *Incoming : PI->incoming_values()) { 547 // If the incoming value is the phi node itself, it can safely be skipped. 548 if (Incoming == PI) continue; 549 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q, MaxRecurse); 550 // If the operation failed to simplify, or simplified to a different value 551 // to previously, then give up. 552 if (!V || (CommonValue && V != CommonValue)) 553 return nullptr; 554 CommonValue = V; 555 } 556 557 return CommonValue; 558 } 559 560 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode, 561 Value *&Op0, Value *&Op1, 562 const SimplifyQuery &Q) { 563 if (auto *CLHS = dyn_cast<Constant>(Op0)) { 564 if (auto *CRHS = dyn_cast<Constant>(Op1)) 565 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL); 566 567 // Canonicalize the constant to the RHS if this is a commutative operation. 568 if (Instruction::isCommutative(Opcode)) 569 std::swap(Op0, Op1); 570 } 571 return nullptr; 572 } 573 574 /// Given operands for an Add, see if we can fold the result. 575 /// If not, this returns null. 576 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, 577 const SimplifyQuery &Q, unsigned MaxRecurse) { 578 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q)) 579 return C; 580 581 // X + undef -> undef 582 if (match(Op1, m_Undef())) 583 return Op1; 584 585 // X + 0 -> X 586 if (match(Op1, m_Zero())) 587 return Op0; 588 589 // If two operands are negative, return 0. 590 if (isKnownNegation(Op0, Op1)) 591 return Constant::getNullValue(Op0->getType()); 592 593 // X + (Y - X) -> Y 594 // (Y - X) + X -> Y 595 // Eg: X + -X -> 0 596 Value *Y = nullptr; 597 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) || 598 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1)))) 599 return Y; 600 601 // X + ~X -> -1 since ~X = -X-1 602 Type *Ty = Op0->getType(); 603 if (match(Op0, m_Not(m_Specific(Op1))) || 604 match(Op1, m_Not(m_Specific(Op0)))) 605 return Constant::getAllOnesValue(Ty); 606 607 // add nsw/nuw (xor Y, signmask), signmask --> Y 608 // The no-wrapping add guarantees that the top bit will be set by the add. 609 // Therefore, the xor must be clearing the already set sign bit of Y. 610 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) && 611 match(Op0, m_Xor(m_Value(Y), m_SignMask()))) 612 return Y; 613 614 // add nuw %x, -1 -> -1, because %x can only be 0. 615 if (IsNUW && match(Op1, m_AllOnes())) 616 return Op1; // Which is -1. 617 618 /// i1 add -> xor. 619 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) 620 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 621 return V; 622 623 // Try some generic simplifications for associative operations. 624 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, 625 MaxRecurse)) 626 return V; 627 628 // Threading Add over selects and phi nodes is pointless, so don't bother. 629 // Threading over the select in "A + select(cond, B, C)" means evaluating 630 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and 631 // only if B and C are equal. If B and C are equal then (since we assume 632 // that operands have already been simplified) "select(cond, B, C)" should 633 // have been simplified to the common value of B and C already. Analysing 634 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly 635 // for threading over phi nodes. 636 637 return nullptr; 638 } 639 640 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, 641 const SimplifyQuery &Query) { 642 return ::SimplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit); 643 } 644 645 /// Compute the base pointer and cumulative constant offsets for V. 646 /// 647 /// This strips all constant offsets off of V, leaving it the base pointer, and 648 /// accumulates the total constant offset applied in the returned constant. It 649 /// returns 0 if V is not a pointer, and returns the constant '0' if there are 650 /// no constant offsets applied. 651 /// 652 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't 653 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc. 654 /// folding. 655 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, 656 bool AllowNonInbounds = false) { 657 assert(V->getType()->isPtrOrPtrVectorTy()); 658 659 Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType(); 660 APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth()); 661 662 V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds); 663 // As that strip may trace through `addrspacecast`, need to sext or trunc 664 // the offset calculated. 665 IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType(); 666 Offset = Offset.sextOrTrunc(IntPtrTy->getIntegerBitWidth()); 667 668 Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset); 669 if (V->getType()->isVectorTy()) 670 return ConstantVector::getSplat(V->getType()->getVectorNumElements(), 671 OffsetIntPtr); 672 return OffsetIntPtr; 673 } 674 675 /// Compute the constant difference between two pointer values. 676 /// If the difference is not a constant, returns zero. 677 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS, 678 Value *RHS) { 679 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 680 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 681 682 // If LHS and RHS are not related via constant offsets to the same base 683 // value, there is nothing we can do here. 684 if (LHS != RHS) 685 return nullptr; 686 687 // Otherwise, the difference of LHS - RHS can be computed as: 688 // LHS - RHS 689 // = (LHSOffset + Base) - (RHSOffset + Base) 690 // = LHSOffset - RHSOffset 691 return ConstantExpr::getSub(LHSOffset, RHSOffset); 692 } 693 694 /// Given operands for a Sub, see if we can fold the result. 695 /// If not, this returns null. 696 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 697 const SimplifyQuery &Q, unsigned MaxRecurse) { 698 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q)) 699 return C; 700 701 // X - undef -> undef 702 // undef - X -> undef 703 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 704 return UndefValue::get(Op0->getType()); 705 706 // X - 0 -> X 707 if (match(Op1, m_Zero())) 708 return Op0; 709 710 // X - X -> 0 711 if (Op0 == Op1) 712 return Constant::getNullValue(Op0->getType()); 713 714 // Is this a negation? 715 if (match(Op0, m_Zero())) { 716 // 0 - X -> 0 if the sub is NUW. 717 if (isNUW) 718 return Constant::getNullValue(Op0->getType()); 719 720 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 721 if (Known.Zero.isMaxSignedValue()) { 722 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then 723 // Op1 must be 0 because negating the minimum signed value is undefined. 724 if (isNSW) 725 return Constant::getNullValue(Op0->getType()); 726 727 // 0 - X -> X if X is 0 or the minimum signed value. 728 return Op1; 729 } 730 } 731 732 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies. 733 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X 734 Value *X = nullptr, *Y = nullptr, *Z = Op1; 735 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z 736 // See if "V === Y - Z" simplifies. 737 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1)) 738 // It does! Now see if "X + V" simplifies. 739 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) { 740 // It does, we successfully reassociated! 741 ++NumReassoc; 742 return W; 743 } 744 // See if "V === X - Z" simplifies. 745 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 746 // It does! Now see if "Y + V" simplifies. 747 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) { 748 // It does, we successfully reassociated! 749 ++NumReassoc; 750 return W; 751 } 752 } 753 754 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies. 755 // For example, X - (X + 1) -> -1 756 X = Op0; 757 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z) 758 // See if "V === X - Y" simplifies. 759 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 760 // It does! Now see if "V - Z" simplifies. 761 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) { 762 // It does, we successfully reassociated! 763 ++NumReassoc; 764 return W; 765 } 766 // See if "V === X - Z" simplifies. 767 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 768 // It does! Now see if "V - Y" simplifies. 769 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) { 770 // It does, we successfully reassociated! 771 ++NumReassoc; 772 return W; 773 } 774 } 775 776 // Z - (X - Y) -> (Z - X) + Y if everything simplifies. 777 // For example, X - (X - Y) -> Y. 778 Z = Op0; 779 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y) 780 // See if "V === Z - X" simplifies. 781 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1)) 782 // It does! Now see if "V + Y" simplifies. 783 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) { 784 // It does, we successfully reassociated! 785 ++NumReassoc; 786 return W; 787 } 788 789 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies. 790 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) && 791 match(Op1, m_Trunc(m_Value(Y)))) 792 if (X->getType() == Y->getType()) 793 // See if "V === X - Y" simplifies. 794 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 795 // It does! Now see if "trunc V" simplifies. 796 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(), 797 Q, MaxRecurse - 1)) 798 // It does, return the simplified "trunc V". 799 return W; 800 801 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...). 802 if (match(Op0, m_PtrToInt(m_Value(X))) && 803 match(Op1, m_PtrToInt(m_Value(Y)))) 804 if (Constant *Result = computePointerDifference(Q.DL, X, Y)) 805 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true); 806 807 // i1 sub -> xor. 808 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) 809 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 810 return V; 811 812 // Threading Sub over selects and phi nodes is pointless, so don't bother. 813 // Threading over the select in "A - select(cond, B, C)" means evaluating 814 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and 815 // only if B and C are equal. If B and C are equal then (since we assume 816 // that operands have already been simplified) "select(cond, B, C)" should 817 // have been simplified to the common value of B and C already. Analysing 818 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly 819 // for threading over phi nodes. 820 821 return nullptr; 822 } 823 824 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 825 const SimplifyQuery &Q) { 826 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit); 827 } 828 829 /// Given operands for a Mul, see if we can fold the result. 830 /// If not, this returns null. 831 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 832 unsigned MaxRecurse) { 833 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q)) 834 return C; 835 836 // X * undef -> 0 837 // X * 0 -> 0 838 if (match(Op1, m_CombineOr(m_Undef(), m_Zero()))) 839 return Constant::getNullValue(Op0->getType()); 840 841 // X * 1 -> X 842 if (match(Op1, m_One())) 843 return Op0; 844 845 // (X / Y) * Y -> X if the division is exact. 846 Value *X = nullptr; 847 if (Q.IIQ.UseInstrInfo && 848 (match(Op0, 849 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y 850 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y) 851 return X; 852 853 // i1 mul -> and. 854 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) 855 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1)) 856 return V; 857 858 // Try some generic simplifications for associative operations. 859 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, 860 MaxRecurse)) 861 return V; 862 863 // Mul distributes over Add. Try some generic simplifications based on this. 864 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add, 865 Q, MaxRecurse)) 866 return V; 867 868 // If the operation is with the result of a select instruction, check whether 869 // operating on either branch of the select always yields the same value. 870 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 871 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, 872 MaxRecurse)) 873 return V; 874 875 // If the operation is with the result of a phi instruction, check whether 876 // operating on all incoming values of the phi always yields the same value. 877 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 878 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, 879 MaxRecurse)) 880 return V; 881 882 return nullptr; 883 } 884 885 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 886 return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit); 887 } 888 889 /// Check for common or similar folds of integer division or integer remainder. 890 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem). 891 static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) { 892 Type *Ty = Op0->getType(); 893 894 // X / undef -> undef 895 // X % undef -> undef 896 if (match(Op1, m_Undef())) 897 return Op1; 898 899 // X / 0 -> undef 900 // X % 0 -> undef 901 // We don't need to preserve faults! 902 if (match(Op1, m_Zero())) 903 return UndefValue::get(Ty); 904 905 // If any element of a constant divisor vector is zero or undef, the whole op 906 // is undef. 907 auto *Op1C = dyn_cast<Constant>(Op1); 908 if (Op1C && Ty->isVectorTy()) { 909 unsigned NumElts = Ty->getVectorNumElements(); 910 for (unsigned i = 0; i != NumElts; ++i) { 911 Constant *Elt = Op1C->getAggregateElement(i); 912 if (Elt && (Elt->isNullValue() || isa<UndefValue>(Elt))) 913 return UndefValue::get(Ty); 914 } 915 } 916 917 // undef / X -> 0 918 // undef % X -> 0 919 if (match(Op0, m_Undef())) 920 return Constant::getNullValue(Ty); 921 922 // 0 / X -> 0 923 // 0 % X -> 0 924 if (match(Op0, m_Zero())) 925 return Constant::getNullValue(Op0->getType()); 926 927 // X / X -> 1 928 // X % X -> 0 929 if (Op0 == Op1) 930 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty); 931 932 // X / 1 -> X 933 // X % 1 -> 0 934 // If this is a boolean op (single-bit element type), we can't have 935 // division-by-zero or remainder-by-zero, so assume the divisor is 1. 936 // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1. 937 Value *X; 938 if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1) || 939 (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) 940 return IsDiv ? Op0 : Constant::getNullValue(Ty); 941 942 return nullptr; 943 } 944 945 /// Given a predicate and two operands, return true if the comparison is true. 946 /// This is a helper for div/rem simplification where we return some other value 947 /// when we can prove a relationship between the operands. 948 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS, 949 const SimplifyQuery &Q, unsigned MaxRecurse) { 950 Value *V = SimplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse); 951 Constant *C = dyn_cast_or_null<Constant>(V); 952 return (C && C->isAllOnesValue()); 953 } 954 955 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer 956 /// to simplify X % Y to X. 957 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, 958 unsigned MaxRecurse, bool IsSigned) { 959 // Recursion is always used, so bail out at once if we already hit the limit. 960 if (!MaxRecurse--) 961 return false; 962 963 if (IsSigned) { 964 // |X| / |Y| --> 0 965 // 966 // We require that 1 operand is a simple constant. That could be extended to 967 // 2 variables if we computed the sign bit for each. 968 // 969 // Make sure that a constant is not the minimum signed value because taking 970 // the abs() of that is undefined. 971 Type *Ty = X->getType(); 972 const APInt *C; 973 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) { 974 // Is the variable divisor magnitude always greater than the constant 975 // dividend magnitude? 976 // |Y| > |C| --> Y < -abs(C) or Y > abs(C) 977 Constant *PosDividendC = ConstantInt::get(Ty, C->abs()); 978 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs()); 979 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) || 980 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse)) 981 return true; 982 } 983 if (match(Y, m_APInt(C))) { 984 // Special-case: we can't take the abs() of a minimum signed value. If 985 // that's the divisor, then all we have to do is prove that the dividend 986 // is also not the minimum signed value. 987 if (C->isMinSignedValue()) 988 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse); 989 990 // Is the variable dividend magnitude always less than the constant 991 // divisor magnitude? 992 // |X| < |C| --> X > -abs(C) and X < abs(C) 993 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs()); 994 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs()); 995 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) && 996 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse)) 997 return true; 998 } 999 return false; 1000 } 1001 1002 // IsSigned == false. 1003 // Is the dividend unsigned less than the divisor? 1004 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse); 1005 } 1006 1007 /// These are simplifications common to SDiv and UDiv. 1008 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 1009 const SimplifyQuery &Q, unsigned MaxRecurse) { 1010 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1011 return C; 1012 1013 if (Value *V = simplifyDivRem(Op0, Op1, true)) 1014 return V; 1015 1016 bool IsSigned = Opcode == Instruction::SDiv; 1017 1018 // (X * Y) / Y -> X if the multiplication does not overflow. 1019 Value *X; 1020 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) { 1021 auto *Mul = cast<OverflowingBinaryOperator>(Op0); 1022 // If the Mul does not overflow, then we are good to go. 1023 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) || 1024 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul))) 1025 return X; 1026 // If X has the form X = A / Y, then X * Y cannot overflow. 1027 if ((IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) || 1028 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) 1029 return X; 1030 } 1031 1032 // (X rem Y) / Y -> 0 1033 if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 1034 (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 1035 return Constant::getNullValue(Op0->getType()); 1036 1037 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow 1038 ConstantInt *C1, *C2; 1039 if (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) && 1040 match(Op1, m_ConstantInt(C2))) { 1041 bool Overflow; 1042 (void)C1->getValue().umul_ov(C2->getValue(), Overflow); 1043 if (Overflow) 1044 return Constant::getNullValue(Op0->getType()); 1045 } 1046 1047 // If the operation is with the result of a select instruction, check whether 1048 // operating on either branch of the select always yields the same value. 1049 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1050 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1051 return V; 1052 1053 // If the operation is with the result of a phi instruction, check whether 1054 // operating on all incoming values of the phi always yields the same value. 1055 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1056 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1057 return V; 1058 1059 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned)) 1060 return Constant::getNullValue(Op0->getType()); 1061 1062 return nullptr; 1063 } 1064 1065 /// These are simplifications common to SRem and URem. 1066 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 1067 const SimplifyQuery &Q, unsigned MaxRecurse) { 1068 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1069 return C; 1070 1071 if (Value *V = simplifyDivRem(Op0, Op1, false)) 1072 return V; 1073 1074 // (X % Y) % Y -> X % Y 1075 if ((Opcode == Instruction::SRem && 1076 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 1077 (Opcode == Instruction::URem && 1078 match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 1079 return Op0; 1080 1081 // (X << Y) % X -> 0 1082 if (Q.IIQ.UseInstrInfo && 1083 ((Opcode == Instruction::SRem && 1084 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) || 1085 (Opcode == Instruction::URem && 1086 match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))) 1087 return Constant::getNullValue(Op0->getType()); 1088 1089 // If the operation is with the result of a select instruction, check whether 1090 // operating on either branch of the select always yields the same value. 1091 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1092 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1093 return V; 1094 1095 // If the operation is with the result of a phi instruction, check whether 1096 // operating on all incoming values of the phi always yields the same value. 1097 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1098 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1099 return V; 1100 1101 // If X / Y == 0, then X % Y == X. 1102 if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem)) 1103 return Op0; 1104 1105 return nullptr; 1106 } 1107 1108 /// Given operands for an SDiv, see if we can fold the result. 1109 /// If not, this returns null. 1110 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1111 unsigned MaxRecurse) { 1112 // If two operands are negated and no signed overflow, return -1. 1113 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true)) 1114 return Constant::getAllOnesValue(Op0->getType()); 1115 1116 return simplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse); 1117 } 1118 1119 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1120 return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit); 1121 } 1122 1123 /// Given operands for a UDiv, see if we can fold the result. 1124 /// If not, this returns null. 1125 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1126 unsigned MaxRecurse) { 1127 return simplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse); 1128 } 1129 1130 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1131 return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit); 1132 } 1133 1134 /// Given operands for an SRem, see if we can fold the result. 1135 /// If not, this returns null. 1136 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1137 unsigned MaxRecurse) { 1138 // If the divisor is 0, the result is undefined, so assume the divisor is -1. 1139 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0 1140 Value *X; 1141 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 1142 return ConstantInt::getNullValue(Op0->getType()); 1143 1144 // If the two operands are negated, return 0. 1145 if (isKnownNegation(Op0, Op1)) 1146 return ConstantInt::getNullValue(Op0->getType()); 1147 1148 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse); 1149 } 1150 1151 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1152 return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit); 1153 } 1154 1155 /// Given operands for a URem, see if we can fold the result. 1156 /// If not, this returns null. 1157 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1158 unsigned MaxRecurse) { 1159 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse); 1160 } 1161 1162 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1163 return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit); 1164 } 1165 1166 /// Returns true if a shift by \c Amount always yields undef. 1167 static bool isUndefShift(Value *Amount) { 1168 Constant *C = dyn_cast<Constant>(Amount); 1169 if (!C) 1170 return false; 1171 1172 // X shift by undef -> undef because it may shift by the bitwidth. 1173 if (isa<UndefValue>(C)) 1174 return true; 1175 1176 // Shifting by the bitwidth or more is undefined. 1177 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1178 if (CI->getValue().getLimitedValue() >= 1179 CI->getType()->getScalarSizeInBits()) 1180 return true; 1181 1182 // If all lanes of a vector shift are undefined the whole shift is. 1183 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) { 1184 for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I) 1185 if (!isUndefShift(C->getAggregateElement(I))) 1186 return false; 1187 return true; 1188 } 1189 1190 return false; 1191 } 1192 1193 /// Given operands for an Shl, LShr or AShr, see if we can fold the result. 1194 /// If not, this returns null. 1195 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0, 1196 Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse) { 1197 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1198 return C; 1199 1200 // 0 shift by X -> 0 1201 if (match(Op0, m_Zero())) 1202 return Constant::getNullValue(Op0->getType()); 1203 1204 // X shift by 0 -> X 1205 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones 1206 // would be poison. 1207 Value *X; 1208 if (match(Op1, m_Zero()) || 1209 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) 1210 return Op0; 1211 1212 // Fold undefined shifts. 1213 if (isUndefShift(Op1)) 1214 return UndefValue::get(Op0->getType()); 1215 1216 // If the operation is with the result of a select instruction, check whether 1217 // operating on either branch of the select always yields the same value. 1218 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1219 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1220 return V; 1221 1222 // If the operation is with the result of a phi instruction, check whether 1223 // operating on all incoming values of the phi always yields the same value. 1224 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1225 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1226 return V; 1227 1228 // If any bits in the shift amount make that value greater than or equal to 1229 // the number of bits in the type, the shift is undefined. 1230 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1231 if (Known.One.getLimitedValue() >= Known.getBitWidth()) 1232 return UndefValue::get(Op0->getType()); 1233 1234 // If all valid bits in the shift amount are known zero, the first operand is 1235 // unchanged. 1236 unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth()); 1237 if (Known.countMinTrailingZeros() >= NumValidShiftBits) 1238 return Op0; 1239 1240 return nullptr; 1241 } 1242 1243 /// Given operands for an Shl, LShr or AShr, see if we can 1244 /// fold the result. If not, this returns null. 1245 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, 1246 Value *Op1, bool isExact, const SimplifyQuery &Q, 1247 unsigned MaxRecurse) { 1248 if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse)) 1249 return V; 1250 1251 // X >> X -> 0 1252 if (Op0 == Op1) 1253 return Constant::getNullValue(Op0->getType()); 1254 1255 // undef >> X -> 0 1256 // undef >> X -> undef (if it's exact) 1257 if (match(Op0, m_Undef())) 1258 return isExact ? Op0 : Constant::getNullValue(Op0->getType()); 1259 1260 // The low bit cannot be shifted out of an exact shift if it is set. 1261 if (isExact) { 1262 KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT); 1263 if (Op0Known.One[0]) 1264 return Op0; 1265 } 1266 1267 return nullptr; 1268 } 1269 1270 /// Given operands for an Shl, see if we can fold the result. 1271 /// If not, this returns null. 1272 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 1273 const SimplifyQuery &Q, unsigned MaxRecurse) { 1274 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse)) 1275 return V; 1276 1277 // undef << X -> 0 1278 // undef << X -> undef if (if it's NSW/NUW) 1279 if (match(Op0, m_Undef())) 1280 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType()); 1281 1282 // (X >> A) << A -> X 1283 Value *X; 1284 if (Q.IIQ.UseInstrInfo && 1285 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1))))) 1286 return X; 1287 1288 // shl nuw i8 C, %x -> C iff C has sign bit set. 1289 if (isNUW && match(Op0, m_Negative())) 1290 return Op0; 1291 // NOTE: could use computeKnownBits() / LazyValueInfo, 1292 // but the cost-benefit analysis suggests it isn't worth it. 1293 1294 return nullptr; 1295 } 1296 1297 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 1298 const SimplifyQuery &Q) { 1299 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit); 1300 } 1301 1302 /// Given operands for an LShr, see if we can fold the result. 1303 /// If not, this returns null. 1304 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 1305 const SimplifyQuery &Q, unsigned MaxRecurse) { 1306 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q, 1307 MaxRecurse)) 1308 return V; 1309 1310 // (X << A) >> A -> X 1311 Value *X; 1312 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1)))) 1313 return X; 1314 1315 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A. 1316 // We can return X as we do in the above case since OR alters no bits in X. 1317 // SimplifyDemandedBits in InstCombine can do more general optimization for 1318 // bit manipulation. This pattern aims to provide opportunities for other 1319 // optimizers by supporting a simple but common case in InstSimplify. 1320 Value *Y; 1321 const APInt *ShRAmt, *ShLAmt; 1322 if (match(Op1, m_APInt(ShRAmt)) && 1323 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) && 1324 *ShRAmt == *ShLAmt) { 1325 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1326 const unsigned Width = Op0->getType()->getScalarSizeInBits(); 1327 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros(); 1328 if (ShRAmt->uge(EffWidthY)) 1329 return X; 1330 } 1331 1332 return nullptr; 1333 } 1334 1335 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 1336 const SimplifyQuery &Q) { 1337 return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit); 1338 } 1339 1340 /// Given operands for an AShr, see if we can fold the result. 1341 /// If not, this returns null. 1342 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 1343 const SimplifyQuery &Q, unsigned MaxRecurse) { 1344 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q, 1345 MaxRecurse)) 1346 return V; 1347 1348 // all ones >>a X -> -1 1349 // Do not return Op0 because it may contain undef elements if it's a vector. 1350 if (match(Op0, m_AllOnes())) 1351 return Constant::getAllOnesValue(Op0->getType()); 1352 1353 // (X << A) >> A -> X 1354 Value *X; 1355 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1)))) 1356 return X; 1357 1358 // Arithmetic shifting an all-sign-bit value is a no-op. 1359 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1360 if (NumSignBits == Op0->getType()->getScalarSizeInBits()) 1361 return Op0; 1362 1363 return nullptr; 1364 } 1365 1366 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 1367 const SimplifyQuery &Q) { 1368 return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit); 1369 } 1370 1371 /// Commuted variants are assumed to be handled by calling this function again 1372 /// with the parameters swapped. 1373 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, 1374 ICmpInst *UnsignedICmp, bool IsAnd, 1375 const SimplifyQuery &Q) { 1376 Value *X, *Y; 1377 1378 ICmpInst::Predicate EqPred; 1379 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) || 1380 !ICmpInst::isEquality(EqPred)) 1381 return nullptr; 1382 1383 ICmpInst::Predicate UnsignedPred; 1384 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) && 1385 ICmpInst::isUnsigned(UnsignedPred)) 1386 ; 1387 else if (match(UnsignedICmp, 1388 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) && 1389 ICmpInst::isUnsigned(UnsignedPred)) 1390 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred); 1391 else 1392 return nullptr; 1393 1394 // X < Y && Y != 0 --> X < Y 1395 // X < Y || Y != 0 --> Y != 0 1396 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE) 1397 return IsAnd ? UnsignedICmp : ZeroICmp; 1398 1399 // X <= Y && Y != 0 --> X <= Y iff X != 0 1400 // X <= Y || Y != 0 --> Y != 0 iff X != 0 1401 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE && 1402 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1403 return IsAnd ? UnsignedICmp : ZeroICmp; 1404 1405 // X > Y && Y == 0 --> Y == 0 iff X != 0 1406 // X > Y || Y == 0 --> X > Y iff X != 0 1407 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ && 1408 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1409 return IsAnd ? ZeroICmp : UnsignedICmp; 1410 1411 // X >= Y || Y != 0 --> true 1412 // X >= Y || Y == 0 --> X >= Y 1413 if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd) { 1414 if (EqPred == ICmpInst::ICMP_NE) 1415 return getTrue(UnsignedICmp->getType()); 1416 return UnsignedICmp; 1417 } 1418 1419 // X < Y && Y == 0 --> false 1420 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ && 1421 IsAnd) 1422 return getFalse(UnsignedICmp->getType()); 1423 1424 return nullptr; 1425 } 1426 1427 /// Commuted variants are assumed to be handled by calling this function again 1428 /// with the parameters swapped. 1429 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { 1430 ICmpInst::Predicate Pred0, Pred1; 1431 Value *A ,*B; 1432 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || 1433 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) 1434 return nullptr; 1435 1436 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B). 1437 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we 1438 // can eliminate Op1 from this 'and'. 1439 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) 1440 return Op0; 1441 1442 // Check for any combination of predicates that are guaranteed to be disjoint. 1443 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || 1444 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) || 1445 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) || 1446 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)) 1447 return getFalse(Op0->getType()); 1448 1449 return nullptr; 1450 } 1451 1452 /// Commuted variants are assumed to be handled by calling this function again 1453 /// with the parameters swapped. 1454 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { 1455 ICmpInst::Predicate Pred0, Pred1; 1456 Value *A ,*B; 1457 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || 1458 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) 1459 return nullptr; 1460 1461 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B). 1462 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we 1463 // can eliminate Op0 from this 'or'. 1464 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) 1465 return Op1; 1466 1467 // Check for any combination of predicates that cover the entire range of 1468 // possibilities. 1469 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || 1470 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) || 1471 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) || 1472 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE)) 1473 return getTrue(Op0->getType()); 1474 1475 return nullptr; 1476 } 1477 1478 /// Test if a pair of compares with a shared operand and 2 constants has an 1479 /// empty set intersection, full set union, or if one compare is a superset of 1480 /// the other. 1481 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, 1482 bool IsAnd) { 1483 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)). 1484 if (Cmp0->getOperand(0) != Cmp1->getOperand(0)) 1485 return nullptr; 1486 1487 const APInt *C0, *C1; 1488 if (!match(Cmp0->getOperand(1), m_APInt(C0)) || 1489 !match(Cmp1->getOperand(1), m_APInt(C1))) 1490 return nullptr; 1491 1492 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0); 1493 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1); 1494 1495 // For and-of-compares, check if the intersection is empty: 1496 // (icmp X, C0) && (icmp X, C1) --> empty set --> false 1497 if (IsAnd && Range0.intersectWith(Range1).isEmptySet()) 1498 return getFalse(Cmp0->getType()); 1499 1500 // For or-of-compares, check if the union is full: 1501 // (icmp X, C0) || (icmp X, C1) --> full set --> true 1502 if (!IsAnd && Range0.unionWith(Range1).isFullSet()) 1503 return getTrue(Cmp0->getType()); 1504 1505 // Is one range a superset of the other? 1506 // If this is and-of-compares, take the smaller set: 1507 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42 1508 // If this is or-of-compares, take the larger set: 1509 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4 1510 if (Range0.contains(Range1)) 1511 return IsAnd ? Cmp1 : Cmp0; 1512 if (Range1.contains(Range0)) 1513 return IsAnd ? Cmp0 : Cmp1; 1514 1515 return nullptr; 1516 } 1517 1518 static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1, 1519 bool IsAnd) { 1520 ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate(); 1521 if (!match(Cmp0->getOperand(1), m_Zero()) || 1522 !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1) 1523 return nullptr; 1524 1525 if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ)) 1526 return nullptr; 1527 1528 // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)". 1529 Value *X = Cmp0->getOperand(0); 1530 Value *Y = Cmp1->getOperand(0); 1531 1532 // If one of the compares is a masked version of a (not) null check, then 1533 // that compare implies the other, so we eliminate the other. Optionally, look 1534 // through a pointer-to-int cast to match a null check of a pointer type. 1535 1536 // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0 1537 // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0 1538 // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0 1539 // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0 1540 if (match(Y, m_c_And(m_Specific(X), m_Value())) || 1541 match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value()))) 1542 return Cmp1; 1543 1544 // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0 1545 // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0 1546 // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0 1547 // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0 1548 if (match(X, m_c_And(m_Specific(Y), m_Value())) || 1549 match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value()))) 1550 return Cmp0; 1551 1552 return nullptr; 1553 } 1554 1555 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, 1556 const InstrInfoQuery &IIQ) { 1557 // (icmp (add V, C0), C1) & (icmp V, C0) 1558 ICmpInst::Predicate Pred0, Pred1; 1559 const APInt *C0, *C1; 1560 Value *V; 1561 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) 1562 return nullptr; 1563 1564 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) 1565 return nullptr; 1566 1567 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0)); 1568 if (AddInst->getOperand(1) != Op1->getOperand(1)) 1569 return nullptr; 1570 1571 Type *ITy = Op0->getType(); 1572 bool isNSW = IIQ.hasNoSignedWrap(AddInst); 1573 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst); 1574 1575 const APInt Delta = *C1 - *C0; 1576 if (C0->isStrictlyPositive()) { 1577 if (Delta == 2) { 1578 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT) 1579 return getFalse(ITy); 1580 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW) 1581 return getFalse(ITy); 1582 } 1583 if (Delta == 1) { 1584 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT) 1585 return getFalse(ITy); 1586 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW) 1587 return getFalse(ITy); 1588 } 1589 } 1590 if (C0->getBoolValue() && isNUW) { 1591 if (Delta == 2) 1592 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT) 1593 return getFalse(ITy); 1594 if (Delta == 1) 1595 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT) 1596 return getFalse(ITy); 1597 } 1598 1599 return nullptr; 1600 } 1601 1602 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, 1603 const SimplifyQuery &Q) { 1604 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q)) 1605 return X; 1606 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q)) 1607 return X; 1608 1609 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1)) 1610 return X; 1611 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0)) 1612 return X; 1613 1614 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true)) 1615 return X; 1616 1617 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true)) 1618 return X; 1619 1620 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ)) 1621 return X; 1622 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ)) 1623 return X; 1624 1625 return nullptr; 1626 } 1627 1628 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, 1629 const InstrInfoQuery &IIQ) { 1630 // (icmp (add V, C0), C1) | (icmp V, C0) 1631 ICmpInst::Predicate Pred0, Pred1; 1632 const APInt *C0, *C1; 1633 Value *V; 1634 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) 1635 return nullptr; 1636 1637 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) 1638 return nullptr; 1639 1640 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0)); 1641 if (AddInst->getOperand(1) != Op1->getOperand(1)) 1642 return nullptr; 1643 1644 Type *ITy = Op0->getType(); 1645 bool isNSW = IIQ.hasNoSignedWrap(AddInst); 1646 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst); 1647 1648 const APInt Delta = *C1 - *C0; 1649 if (C0->isStrictlyPositive()) { 1650 if (Delta == 2) { 1651 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE) 1652 return getTrue(ITy); 1653 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW) 1654 return getTrue(ITy); 1655 } 1656 if (Delta == 1) { 1657 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE) 1658 return getTrue(ITy); 1659 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW) 1660 return getTrue(ITy); 1661 } 1662 } 1663 if (C0->getBoolValue() && isNUW) { 1664 if (Delta == 2) 1665 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE) 1666 return getTrue(ITy); 1667 if (Delta == 1) 1668 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE) 1669 return getTrue(ITy); 1670 } 1671 1672 return nullptr; 1673 } 1674 1675 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, 1676 const SimplifyQuery &Q) { 1677 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q)) 1678 return X; 1679 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q)) 1680 return X; 1681 1682 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1)) 1683 return X; 1684 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0)) 1685 return X; 1686 1687 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false)) 1688 return X; 1689 1690 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false)) 1691 return X; 1692 1693 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ)) 1694 return X; 1695 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ)) 1696 return X; 1697 1698 return nullptr; 1699 } 1700 1701 static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI, 1702 FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { 1703 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 1704 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 1705 if (LHS0->getType() != RHS0->getType()) 1706 return nullptr; 1707 1708 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 1709 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) || 1710 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) { 1711 // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y 1712 // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X 1713 // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y 1714 // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X 1715 // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y 1716 // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X 1717 // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y 1718 // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X 1719 if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) || 1720 (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1))) 1721 return RHS; 1722 1723 // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y 1724 // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X 1725 // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y 1726 // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X 1727 // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y 1728 // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X 1729 // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y 1730 // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X 1731 if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) || 1732 (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1))) 1733 return LHS; 1734 } 1735 1736 return nullptr; 1737 } 1738 1739 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, 1740 Value *Op0, Value *Op1, bool IsAnd) { 1741 // Look through casts of the 'and' operands to find compares. 1742 auto *Cast0 = dyn_cast<CastInst>(Op0); 1743 auto *Cast1 = dyn_cast<CastInst>(Op1); 1744 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() && 1745 Cast0->getSrcTy() == Cast1->getSrcTy()) { 1746 Op0 = Cast0->getOperand(0); 1747 Op1 = Cast1->getOperand(0); 1748 } 1749 1750 Value *V = nullptr; 1751 auto *ICmp0 = dyn_cast<ICmpInst>(Op0); 1752 auto *ICmp1 = dyn_cast<ICmpInst>(Op1); 1753 if (ICmp0 && ICmp1) 1754 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q) 1755 : simplifyOrOfICmps(ICmp0, ICmp1, Q); 1756 1757 auto *FCmp0 = dyn_cast<FCmpInst>(Op0); 1758 auto *FCmp1 = dyn_cast<FCmpInst>(Op1); 1759 if (FCmp0 && FCmp1) 1760 V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd); 1761 1762 if (!V) 1763 return nullptr; 1764 if (!Cast0) 1765 return V; 1766 1767 // If we looked through casts, we can only handle a constant simplification 1768 // because we are not allowed to create a cast instruction here. 1769 if (auto *C = dyn_cast<Constant>(V)) 1770 return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType()); 1771 1772 return nullptr; 1773 } 1774 1775 /// Check that the Op1 is in expected form, i.e.: 1776 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???) 1777 /// %Op1 = extractvalue { i4, i1 } %Agg, 1 1778 static bool omitCheckForZeroBeforeMulWithOverflowInternal(Value *Op1, 1779 Value *X) { 1780 auto *Extract = dyn_cast<ExtractValueInst>(Op1); 1781 // We should only be extracting the overflow bit. 1782 if (!Extract || !Extract->getIndices().equals(1)) 1783 return false; 1784 Value *Agg = Extract->getAggregateOperand(); 1785 // This should be a multiplication-with-overflow intrinsic. 1786 if (!match(Agg, m_CombineOr(m_Intrinsic<Intrinsic::umul_with_overflow>(), 1787 m_Intrinsic<Intrinsic::smul_with_overflow>()))) 1788 return false; 1789 // One of its multipliers should be the value we checked for zero before. 1790 if (!match(Agg, m_CombineOr(m_Argument<0>(m_Specific(X)), 1791 m_Argument<1>(m_Specific(X))))) 1792 return false; 1793 return true; 1794 } 1795 1796 /// The @llvm.[us]mul.with.overflow intrinsic could have been folded from some 1797 /// other form of check, e.g. one that was using division; it may have been 1798 /// guarded against division-by-zero. We can drop that check now. 1799 /// Look for: 1800 /// %Op0 = icmp ne i4 %X, 0 1801 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???) 1802 /// %Op1 = extractvalue { i4, i1 } %Agg, 1 1803 /// %??? = and i1 %Op0, %Op1 1804 /// We can just return %Op1 1805 static Value *omitCheckForZeroBeforeMulWithOverflow(Value *Op0, Value *Op1) { 1806 ICmpInst::Predicate Pred; 1807 Value *X; 1808 if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())) || 1809 Pred != ICmpInst::Predicate::ICMP_NE) 1810 return nullptr; 1811 // Is Op1 in expected form? 1812 if (!omitCheckForZeroBeforeMulWithOverflowInternal(Op1, X)) 1813 return nullptr; 1814 // Can omit 'and', and just return the overflow bit. 1815 return Op1; 1816 } 1817 1818 /// The @llvm.[us]mul.with.overflow intrinsic could have been folded from some 1819 /// other form of check, e.g. one that was using division; it may have been 1820 /// guarded against division-by-zero. We can drop that check now. 1821 /// Look for: 1822 /// %Op0 = icmp eq i4 %X, 0 1823 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???) 1824 /// %Op1 = extractvalue { i4, i1 } %Agg, 1 1825 /// %NotOp1 = xor i1 %Op1, true 1826 /// %or = or i1 %Op0, %NotOp1 1827 /// We can just return %NotOp1 1828 static Value *omitCheckForZeroBeforeInvertedMulWithOverflow(Value *Op0, 1829 Value *NotOp1) { 1830 ICmpInst::Predicate Pred; 1831 Value *X; 1832 if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())) || 1833 Pred != ICmpInst::Predicate::ICMP_EQ) 1834 return nullptr; 1835 // We expect the other hand of an 'or' to be a 'not'. 1836 Value *Op1; 1837 if (!match(NotOp1, m_Not(m_Value(Op1)))) 1838 return nullptr; 1839 // Is Op1 in expected form? 1840 if (!omitCheckForZeroBeforeMulWithOverflowInternal(Op1, X)) 1841 return nullptr; 1842 // Can omit 'and', and just return the inverted overflow bit. 1843 return NotOp1; 1844 } 1845 1846 /// Given operands for an And, see if we can fold the result. 1847 /// If not, this returns null. 1848 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1849 unsigned MaxRecurse) { 1850 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q)) 1851 return C; 1852 1853 // X & undef -> 0 1854 if (match(Op1, m_Undef())) 1855 return Constant::getNullValue(Op0->getType()); 1856 1857 // X & X = X 1858 if (Op0 == Op1) 1859 return Op0; 1860 1861 // X & 0 = 0 1862 if (match(Op1, m_Zero())) 1863 return Constant::getNullValue(Op0->getType()); 1864 1865 // X & -1 = X 1866 if (match(Op1, m_AllOnes())) 1867 return Op0; 1868 1869 // A & ~A = ~A & A = 0 1870 if (match(Op0, m_Not(m_Specific(Op1))) || 1871 match(Op1, m_Not(m_Specific(Op0)))) 1872 return Constant::getNullValue(Op0->getType()); 1873 1874 // (A | ?) & A = A 1875 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value()))) 1876 return Op1; 1877 1878 // A & (A | ?) = A 1879 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value()))) 1880 return Op0; 1881 1882 // A mask that only clears known zeros of a shifted value is a no-op. 1883 Value *X; 1884 const APInt *Mask; 1885 const APInt *ShAmt; 1886 if (match(Op1, m_APInt(Mask))) { 1887 // If all bits in the inverted and shifted mask are clear: 1888 // and (shl X, ShAmt), Mask --> shl X, ShAmt 1889 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) && 1890 (~(*Mask)).lshr(*ShAmt).isNullValue()) 1891 return Op0; 1892 1893 // If all bits in the inverted and shifted mask are clear: 1894 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt 1895 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) && 1896 (~(*Mask)).shl(*ShAmt).isNullValue()) 1897 return Op0; 1898 } 1899 1900 // If we have a multiplication overflow check that is being 'and'ed with a 1901 // check that one of the multipliers is not zero, we can omit the 'and', and 1902 // only keep the overflow check. 1903 if (Value *V = omitCheckForZeroBeforeMulWithOverflow(Op0, Op1)) 1904 return V; 1905 if (Value *V = omitCheckForZeroBeforeMulWithOverflow(Op1, Op0)) 1906 return V; 1907 1908 // A & (-A) = A if A is a power of two or zero. 1909 if (match(Op0, m_Neg(m_Specific(Op1))) || 1910 match(Op1, m_Neg(m_Specific(Op0)))) { 1911 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, 1912 Q.DT)) 1913 return Op0; 1914 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, 1915 Q.DT)) 1916 return Op1; 1917 } 1918 1919 // This is a similar pattern used for checking if a value is a power-of-2: 1920 // (A - 1) & A --> 0 (if A is a power-of-2 or 0) 1921 // A & (A - 1) --> 0 (if A is a power-of-2 or 0) 1922 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) && 1923 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT)) 1924 return Constant::getNullValue(Op1->getType()); 1925 if (match(Op1, m_Add(m_Specific(Op0), m_AllOnes())) && 1926 isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT)) 1927 return Constant::getNullValue(Op0->getType()); 1928 1929 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true)) 1930 return V; 1931 1932 // Try some generic simplifications for associative operations. 1933 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, 1934 MaxRecurse)) 1935 return V; 1936 1937 // And distributes over Or. Try some generic simplifications based on this. 1938 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or, 1939 Q, MaxRecurse)) 1940 return V; 1941 1942 // And distributes over Xor. Try some generic simplifications based on this. 1943 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor, 1944 Q, MaxRecurse)) 1945 return V; 1946 1947 // If the operation is with the result of a select instruction, check whether 1948 // operating on either branch of the select always yields the same value. 1949 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1950 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q, 1951 MaxRecurse)) 1952 return V; 1953 1954 // If the operation is with the result of a phi instruction, check whether 1955 // operating on all incoming values of the phi always yields the same value. 1956 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1957 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q, 1958 MaxRecurse)) 1959 return V; 1960 1961 // Assuming the effective width of Y is not larger than A, i.e. all bits 1962 // from X and Y are disjoint in (X << A) | Y, 1963 // if the mask of this AND op covers all bits of X or Y, while it covers 1964 // no bits from the other, we can bypass this AND op. E.g., 1965 // ((X << A) | Y) & Mask -> Y, 1966 // if Mask = ((1 << effective_width_of(Y)) - 1) 1967 // ((X << A) | Y) & Mask -> X << A, 1968 // if Mask = ((1 << effective_width_of(X)) - 1) << A 1969 // SimplifyDemandedBits in InstCombine can optimize the general case. 1970 // This pattern aims to help other passes for a common case. 1971 Value *Y, *XShifted; 1972 if (match(Op1, m_APInt(Mask)) && 1973 match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)), 1974 m_Value(XShifted)), 1975 m_Value(Y)))) { 1976 const unsigned Width = Op0->getType()->getScalarSizeInBits(); 1977 const unsigned ShftCnt = ShAmt->getLimitedValue(Width); 1978 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1979 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros(); 1980 if (EffWidthY <= ShftCnt) { 1981 const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI, 1982 Q.DT); 1983 const unsigned EffWidthX = Width - XKnown.countMinLeadingZeros(); 1984 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY); 1985 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt; 1986 // If the mask is extracting all bits from X or Y as is, we can skip 1987 // this AND op. 1988 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask)) 1989 return Y; 1990 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask)) 1991 return XShifted; 1992 } 1993 } 1994 1995 return nullptr; 1996 } 1997 1998 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1999 return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit); 2000 } 2001 2002 /// Given operands for an Or, see if we can fold the result. 2003 /// If not, this returns null. 2004 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 2005 unsigned MaxRecurse) { 2006 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q)) 2007 return C; 2008 2009 // X | undef -> -1 2010 // X | -1 = -1 2011 // Do not return Op1 because it may contain undef elements if it's a vector. 2012 if (match(Op1, m_Undef()) || match(Op1, m_AllOnes())) 2013 return Constant::getAllOnesValue(Op0->getType()); 2014 2015 // X | X = X 2016 // X | 0 = X 2017 if (Op0 == Op1 || match(Op1, m_Zero())) 2018 return Op0; 2019 2020 // A | ~A = ~A | A = -1 2021 if (match(Op0, m_Not(m_Specific(Op1))) || 2022 match(Op1, m_Not(m_Specific(Op0)))) 2023 return Constant::getAllOnesValue(Op0->getType()); 2024 2025 // (A & ?) | A = A 2026 if (match(Op0, m_c_And(m_Specific(Op1), m_Value()))) 2027 return Op1; 2028 2029 // A | (A & ?) = A 2030 if (match(Op1, m_c_And(m_Specific(Op0), m_Value()))) 2031 return Op0; 2032 2033 // ~(A & ?) | A = -1 2034 if (match(Op0, m_Not(m_c_And(m_Specific(Op1), m_Value())))) 2035 return Constant::getAllOnesValue(Op1->getType()); 2036 2037 // A | ~(A & ?) = -1 2038 if (match(Op1, m_Not(m_c_And(m_Specific(Op1), m_Value())))) 2039 return Constant::getAllOnesValue(Op0->getType()); 2040 2041 Value *A, *B; 2042 // (A & ~B) | (A ^ B) -> (A ^ B) 2043 // (~B & A) | (A ^ B) -> (A ^ B) 2044 // (A & ~B) | (B ^ A) -> (B ^ A) 2045 // (~B & A) | (B ^ A) -> (B ^ A) 2046 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && 2047 (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) || 2048 match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))) 2049 return Op1; 2050 2051 // Commute the 'or' operands. 2052 // (A ^ B) | (A & ~B) -> (A ^ B) 2053 // (A ^ B) | (~B & A) -> (A ^ B) 2054 // (B ^ A) | (A & ~B) -> (B ^ A) 2055 // (B ^ A) | (~B & A) -> (B ^ A) 2056 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && 2057 (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) || 2058 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))) 2059 return Op0; 2060 2061 // (A & B) | (~A ^ B) -> (~A ^ B) 2062 // (B & A) | (~A ^ B) -> (~A ^ B) 2063 // (A & B) | (B ^ ~A) -> (B ^ ~A) 2064 // (B & A) | (B ^ ~A) -> (B ^ ~A) 2065 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 2066 (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) || 2067 match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) 2068 return Op1; 2069 2070 // (~A ^ B) | (A & B) -> (~A ^ B) 2071 // (~A ^ B) | (B & A) -> (~A ^ B) 2072 // (B ^ ~A) | (A & B) -> (B ^ ~A) 2073 // (B ^ ~A) | (B & A) -> (B ^ ~A) 2074 if (match(Op1, m_And(m_Value(A), m_Value(B))) && 2075 (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) || 2076 match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) 2077 return Op0; 2078 2079 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false)) 2080 return V; 2081 2082 // If we have a multiplication overflow check that is being 'and'ed with a 2083 // check that one of the multipliers is not zero, we can omit the 'and', and 2084 // only keep the overflow check. 2085 if (Value *V = omitCheckForZeroBeforeInvertedMulWithOverflow(Op0, Op1)) 2086 return V; 2087 if (Value *V = omitCheckForZeroBeforeInvertedMulWithOverflow(Op1, Op0)) 2088 return V; 2089 2090 // Try some generic simplifications for associative operations. 2091 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, 2092 MaxRecurse)) 2093 return V; 2094 2095 // Or distributes over And. Try some generic simplifications based on this. 2096 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q, 2097 MaxRecurse)) 2098 return V; 2099 2100 // If the operation is with the result of a select instruction, check whether 2101 // operating on either branch of the select always yields the same value. 2102 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 2103 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, 2104 MaxRecurse)) 2105 return V; 2106 2107 // (A & C1)|(B & C2) 2108 const APInt *C1, *C2; 2109 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) && 2110 match(Op1, m_And(m_Value(B), m_APInt(C2)))) { 2111 if (*C1 == ~*C2) { 2112 // (A & C1)|(B & C2) 2113 // If we have: ((V + N) & C1) | (V & C2) 2114 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0 2115 // replace with V+N. 2116 Value *N; 2117 if (C2->isMask() && // C2 == 0+1+ 2118 match(A, m_c_Add(m_Specific(B), m_Value(N)))) { 2119 // Add commutes, try both ways. 2120 if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2121 return A; 2122 } 2123 // Or commutes, try both ways. 2124 if (C1->isMask() && 2125 match(B, m_c_Add(m_Specific(A), m_Value(N)))) { 2126 // Add commutes, try both ways. 2127 if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2128 return B; 2129 } 2130 } 2131 } 2132 2133 // If the operation is with the result of a phi instruction, check whether 2134 // operating on all incoming values of the phi always yields the same value. 2135 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 2136 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse)) 2137 return V; 2138 2139 return nullptr; 2140 } 2141 2142 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 2143 return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit); 2144 } 2145 2146 /// Given operands for a Xor, see if we can fold the result. 2147 /// If not, this returns null. 2148 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 2149 unsigned MaxRecurse) { 2150 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q)) 2151 return C; 2152 2153 // A ^ undef -> undef 2154 if (match(Op1, m_Undef())) 2155 return Op1; 2156 2157 // A ^ 0 = A 2158 if (match(Op1, m_Zero())) 2159 return Op0; 2160 2161 // A ^ A = 0 2162 if (Op0 == Op1) 2163 return Constant::getNullValue(Op0->getType()); 2164 2165 // A ^ ~A = ~A ^ A = -1 2166 if (match(Op0, m_Not(m_Specific(Op1))) || 2167 match(Op1, m_Not(m_Specific(Op0)))) 2168 return Constant::getAllOnesValue(Op0->getType()); 2169 2170 // Try some generic simplifications for associative operations. 2171 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, 2172 MaxRecurse)) 2173 return V; 2174 2175 // Threading Xor over selects and phi nodes is pointless, so don't bother. 2176 // Threading over the select in "A ^ select(cond, B, C)" means evaluating 2177 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and 2178 // only if B and C are equal. If B and C are equal then (since we assume 2179 // that operands have already been simplified) "select(cond, B, C)" should 2180 // have been simplified to the common value of B and C already. Analysing 2181 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly 2182 // for threading over phi nodes. 2183 2184 return nullptr; 2185 } 2186 2187 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 2188 return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit); 2189 } 2190 2191 2192 static Type *GetCompareTy(Value *Op) { 2193 return CmpInst::makeCmpResultType(Op->getType()); 2194 } 2195 2196 /// Rummage around inside V looking for something equivalent to the comparison 2197 /// "LHS Pred RHS". Return such a value if found, otherwise return null. 2198 /// Helper function for analyzing max/min idioms. 2199 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred, 2200 Value *LHS, Value *RHS) { 2201 SelectInst *SI = dyn_cast<SelectInst>(V); 2202 if (!SI) 2203 return nullptr; 2204 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 2205 if (!Cmp) 2206 return nullptr; 2207 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1); 2208 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS) 2209 return Cmp; 2210 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) && 2211 LHS == CmpRHS && RHS == CmpLHS) 2212 return Cmp; 2213 return nullptr; 2214 } 2215 2216 // A significant optimization not implemented here is assuming that alloca 2217 // addresses are not equal to incoming argument values. They don't *alias*, 2218 // as we say, but that doesn't mean they aren't equal, so we take a 2219 // conservative approach. 2220 // 2221 // This is inspired in part by C++11 5.10p1: 2222 // "Two pointers of the same type compare equal if and only if they are both 2223 // null, both point to the same function, or both represent the same 2224 // address." 2225 // 2226 // This is pretty permissive. 2227 // 2228 // It's also partly due to C11 6.5.9p6: 2229 // "Two pointers compare equal if and only if both are null pointers, both are 2230 // pointers to the same object (including a pointer to an object and a 2231 // subobject at its beginning) or function, both are pointers to one past the 2232 // last element of the same array object, or one is a pointer to one past the 2233 // end of one array object and the other is a pointer to the start of a 2234 // different array object that happens to immediately follow the first array 2235 // object in the address space.) 2236 // 2237 // C11's version is more restrictive, however there's no reason why an argument 2238 // couldn't be a one-past-the-end value for a stack object in the caller and be 2239 // equal to the beginning of a stack object in the callee. 2240 // 2241 // If the C and C++ standards are ever made sufficiently restrictive in this 2242 // area, it may be possible to update LLVM's semantics accordingly and reinstate 2243 // this optimization. 2244 static Constant * 2245 computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI, 2246 const DominatorTree *DT, CmpInst::Predicate Pred, 2247 AssumptionCache *AC, const Instruction *CxtI, 2248 const InstrInfoQuery &IIQ, Value *LHS, Value *RHS) { 2249 // First, skip past any trivial no-ops. 2250 LHS = LHS->stripPointerCasts(); 2251 RHS = RHS->stripPointerCasts(); 2252 2253 // A non-null pointer is not equal to a null pointer. 2254 if (llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr, 2255 IIQ.UseInstrInfo) && 2256 isa<ConstantPointerNull>(RHS) && 2257 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE)) 2258 return ConstantInt::get(GetCompareTy(LHS), 2259 !CmpInst::isTrueWhenEqual(Pred)); 2260 2261 // We can only fold certain predicates on pointer comparisons. 2262 switch (Pred) { 2263 default: 2264 return nullptr; 2265 2266 // Equality comaprisons are easy to fold. 2267 case CmpInst::ICMP_EQ: 2268 case CmpInst::ICMP_NE: 2269 break; 2270 2271 // We can only handle unsigned relational comparisons because 'inbounds' on 2272 // a GEP only protects against unsigned wrapping. 2273 case CmpInst::ICMP_UGT: 2274 case CmpInst::ICMP_UGE: 2275 case CmpInst::ICMP_ULT: 2276 case CmpInst::ICMP_ULE: 2277 // However, we have to switch them to their signed variants to handle 2278 // negative indices from the base pointer. 2279 Pred = ICmpInst::getSignedPredicate(Pred); 2280 break; 2281 } 2282 2283 // Strip off any constant offsets so that we can reason about them. 2284 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets 2285 // here and compare base addresses like AliasAnalysis does, however there are 2286 // numerous hazards. AliasAnalysis and its utilities rely on special rules 2287 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis 2288 // doesn't need to guarantee pointer inequality when it says NoAlias. 2289 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 2290 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 2291 2292 // If LHS and RHS are related via constant offsets to the same base 2293 // value, we can replace it with an icmp which just compares the offsets. 2294 if (LHS == RHS) 2295 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset); 2296 2297 // Various optimizations for (in)equality comparisons. 2298 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { 2299 // Different non-empty allocations that exist at the same time have 2300 // different addresses (if the program can tell). Global variables always 2301 // exist, so they always exist during the lifetime of each other and all 2302 // allocas. Two different allocas usually have different addresses... 2303 // 2304 // However, if there's an @llvm.stackrestore dynamically in between two 2305 // allocas, they may have the same address. It's tempting to reduce the 2306 // scope of the problem by only looking at *static* allocas here. That would 2307 // cover the majority of allocas while significantly reducing the likelihood 2308 // of having an @llvm.stackrestore pop up in the middle. However, it's not 2309 // actually impossible for an @llvm.stackrestore to pop up in the middle of 2310 // an entry block. Also, if we have a block that's not attached to a 2311 // function, we can't tell if it's "static" under the current definition. 2312 // Theoretically, this problem could be fixed by creating a new kind of 2313 // instruction kind specifically for static allocas. Such a new instruction 2314 // could be required to be at the top of the entry block, thus preventing it 2315 // from being subject to a @llvm.stackrestore. Instcombine could even 2316 // convert regular allocas into these special allocas. It'd be nifty. 2317 // However, until then, this problem remains open. 2318 // 2319 // So, we'll assume that two non-empty allocas have different addresses 2320 // for now. 2321 // 2322 // With all that, if the offsets are within the bounds of their allocations 2323 // (and not one-past-the-end! so we can't use inbounds!), and their 2324 // allocations aren't the same, the pointers are not equal. 2325 // 2326 // Note that it's not necessary to check for LHS being a global variable 2327 // address, due to canonicalization and constant folding. 2328 if (isa<AllocaInst>(LHS) && 2329 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) { 2330 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset); 2331 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset); 2332 uint64_t LHSSize, RHSSize; 2333 ObjectSizeOpts Opts; 2334 Opts.NullIsUnknownSize = 2335 NullPointerIsDefined(cast<AllocaInst>(LHS)->getFunction()); 2336 if (LHSOffsetCI && RHSOffsetCI && 2337 getObjectSize(LHS, LHSSize, DL, TLI, Opts) && 2338 getObjectSize(RHS, RHSSize, DL, TLI, Opts)) { 2339 const APInt &LHSOffsetValue = LHSOffsetCI->getValue(); 2340 const APInt &RHSOffsetValue = RHSOffsetCI->getValue(); 2341 if (!LHSOffsetValue.isNegative() && 2342 !RHSOffsetValue.isNegative() && 2343 LHSOffsetValue.ult(LHSSize) && 2344 RHSOffsetValue.ult(RHSSize)) { 2345 return ConstantInt::get(GetCompareTy(LHS), 2346 !CmpInst::isTrueWhenEqual(Pred)); 2347 } 2348 } 2349 2350 // Repeat the above check but this time without depending on DataLayout 2351 // or being able to compute a precise size. 2352 if (!cast<PointerType>(LHS->getType())->isEmptyTy() && 2353 !cast<PointerType>(RHS->getType())->isEmptyTy() && 2354 LHSOffset->isNullValue() && 2355 RHSOffset->isNullValue()) 2356 return ConstantInt::get(GetCompareTy(LHS), 2357 !CmpInst::isTrueWhenEqual(Pred)); 2358 } 2359 2360 // Even if an non-inbounds GEP occurs along the path we can still optimize 2361 // equality comparisons concerning the result. We avoid walking the whole 2362 // chain again by starting where the last calls to 2363 // stripAndComputeConstantOffsets left off and accumulate the offsets. 2364 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true); 2365 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true); 2366 if (LHS == RHS) 2367 return ConstantExpr::getICmp(Pred, 2368 ConstantExpr::getAdd(LHSOffset, LHSNoBound), 2369 ConstantExpr::getAdd(RHSOffset, RHSNoBound)); 2370 2371 // If one side of the equality comparison must come from a noalias call 2372 // (meaning a system memory allocation function), and the other side must 2373 // come from a pointer that cannot overlap with dynamically-allocated 2374 // memory within the lifetime of the current function (allocas, byval 2375 // arguments, globals), then determine the comparison result here. 2376 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs; 2377 GetUnderlyingObjects(LHS, LHSUObjs, DL); 2378 GetUnderlyingObjects(RHS, RHSUObjs, DL); 2379 2380 // Is the set of underlying objects all noalias calls? 2381 auto IsNAC = [](ArrayRef<const Value *> Objects) { 2382 return all_of(Objects, isNoAliasCall); 2383 }; 2384 2385 // Is the set of underlying objects all things which must be disjoint from 2386 // noalias calls. For allocas, we consider only static ones (dynamic 2387 // allocas might be transformed into calls to malloc not simultaneously 2388 // live with the compared-to allocation). For globals, we exclude symbols 2389 // that might be resolve lazily to symbols in another dynamically-loaded 2390 // library (and, thus, could be malloc'ed by the implementation). 2391 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) { 2392 return all_of(Objects, [](const Value *V) { 2393 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) 2394 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca(); 2395 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 2396 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() || 2397 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) && 2398 !GV->isThreadLocal(); 2399 if (const Argument *A = dyn_cast<Argument>(V)) 2400 return A->hasByValAttr(); 2401 return false; 2402 }); 2403 }; 2404 2405 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) || 2406 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs))) 2407 return ConstantInt::get(GetCompareTy(LHS), 2408 !CmpInst::isTrueWhenEqual(Pred)); 2409 2410 // Fold comparisons for non-escaping pointer even if the allocation call 2411 // cannot be elided. We cannot fold malloc comparison to null. Also, the 2412 // dynamic allocation call could be either of the operands. 2413 Value *MI = nullptr; 2414 if (isAllocLikeFn(LHS, TLI) && 2415 llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT)) 2416 MI = LHS; 2417 else if (isAllocLikeFn(RHS, TLI) && 2418 llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT)) 2419 MI = RHS; 2420 // FIXME: We should also fold the compare when the pointer escapes, but the 2421 // compare dominates the pointer escape 2422 if (MI && !PointerMayBeCaptured(MI, true, true)) 2423 return ConstantInt::get(GetCompareTy(LHS), 2424 CmpInst::isFalseWhenEqual(Pred)); 2425 } 2426 2427 // Otherwise, fail. 2428 return nullptr; 2429 } 2430 2431 /// Fold an icmp when its operands have i1 scalar type. 2432 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS, 2433 Value *RHS, const SimplifyQuery &Q) { 2434 Type *ITy = GetCompareTy(LHS); // The return type. 2435 Type *OpTy = LHS->getType(); // The operand type. 2436 if (!OpTy->isIntOrIntVectorTy(1)) 2437 return nullptr; 2438 2439 // A boolean compared to true/false can be simplified in 14 out of the 20 2440 // (10 predicates * 2 constants) possible combinations. Cases not handled here 2441 // require a 'not' of the LHS, so those must be transformed in InstCombine. 2442 if (match(RHS, m_Zero())) { 2443 switch (Pred) { 2444 case CmpInst::ICMP_NE: // X != 0 -> X 2445 case CmpInst::ICMP_UGT: // X >u 0 -> X 2446 case CmpInst::ICMP_SLT: // X <s 0 -> X 2447 return LHS; 2448 2449 case CmpInst::ICMP_ULT: // X <u 0 -> false 2450 case CmpInst::ICMP_SGT: // X >s 0 -> false 2451 return getFalse(ITy); 2452 2453 case CmpInst::ICMP_UGE: // X >=u 0 -> true 2454 case CmpInst::ICMP_SLE: // X <=s 0 -> true 2455 return getTrue(ITy); 2456 2457 default: break; 2458 } 2459 } else if (match(RHS, m_One())) { 2460 switch (Pred) { 2461 case CmpInst::ICMP_EQ: // X == 1 -> X 2462 case CmpInst::ICMP_UGE: // X >=u 1 -> X 2463 case CmpInst::ICMP_SLE: // X <=s -1 -> X 2464 return LHS; 2465 2466 case CmpInst::ICMP_UGT: // X >u 1 -> false 2467 case CmpInst::ICMP_SLT: // X <s -1 -> false 2468 return getFalse(ITy); 2469 2470 case CmpInst::ICMP_ULE: // X <=u 1 -> true 2471 case CmpInst::ICMP_SGE: // X >=s -1 -> true 2472 return getTrue(ITy); 2473 2474 default: break; 2475 } 2476 } 2477 2478 switch (Pred) { 2479 default: 2480 break; 2481 case ICmpInst::ICMP_UGE: 2482 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false)) 2483 return getTrue(ITy); 2484 break; 2485 case ICmpInst::ICMP_SGE: 2486 /// For signed comparison, the values for an i1 are 0 and -1 2487 /// respectively. This maps into a truth table of: 2488 /// LHS | RHS | LHS >=s RHS | LHS implies RHS 2489 /// 0 | 0 | 1 (0 >= 0) | 1 2490 /// 0 | 1 | 1 (0 >= -1) | 1 2491 /// 1 | 0 | 0 (-1 >= 0) | 0 2492 /// 1 | 1 | 1 (-1 >= -1) | 1 2493 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false)) 2494 return getTrue(ITy); 2495 break; 2496 case ICmpInst::ICMP_ULE: 2497 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false)) 2498 return getTrue(ITy); 2499 break; 2500 } 2501 2502 return nullptr; 2503 } 2504 2505 /// Try hard to fold icmp with zero RHS because this is a common case. 2506 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS, 2507 Value *RHS, const SimplifyQuery &Q) { 2508 if (!match(RHS, m_Zero())) 2509 return nullptr; 2510 2511 Type *ITy = GetCompareTy(LHS); // The return type. 2512 switch (Pred) { 2513 default: 2514 llvm_unreachable("Unknown ICmp predicate!"); 2515 case ICmpInst::ICMP_ULT: 2516 return getFalse(ITy); 2517 case ICmpInst::ICMP_UGE: 2518 return getTrue(ITy); 2519 case ICmpInst::ICMP_EQ: 2520 case ICmpInst::ICMP_ULE: 2521 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) 2522 return getFalse(ITy); 2523 break; 2524 case ICmpInst::ICMP_NE: 2525 case ICmpInst::ICMP_UGT: 2526 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) 2527 return getTrue(ITy); 2528 break; 2529 case ICmpInst::ICMP_SLT: { 2530 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2531 if (LHSKnown.isNegative()) 2532 return getTrue(ITy); 2533 if (LHSKnown.isNonNegative()) 2534 return getFalse(ITy); 2535 break; 2536 } 2537 case ICmpInst::ICMP_SLE: { 2538 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2539 if (LHSKnown.isNegative()) 2540 return getTrue(ITy); 2541 if (LHSKnown.isNonNegative() && 2542 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2543 return getFalse(ITy); 2544 break; 2545 } 2546 case ICmpInst::ICMP_SGE: { 2547 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2548 if (LHSKnown.isNegative()) 2549 return getFalse(ITy); 2550 if (LHSKnown.isNonNegative()) 2551 return getTrue(ITy); 2552 break; 2553 } 2554 case ICmpInst::ICMP_SGT: { 2555 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2556 if (LHSKnown.isNegative()) 2557 return getFalse(ITy); 2558 if (LHSKnown.isNonNegative() && 2559 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2560 return getTrue(ITy); 2561 break; 2562 } 2563 } 2564 2565 return nullptr; 2566 } 2567 2568 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS, 2569 Value *RHS, const InstrInfoQuery &IIQ) { 2570 Type *ITy = GetCompareTy(RHS); // The return type. 2571 2572 Value *X; 2573 // Sign-bit checks can be optimized to true/false after unsigned 2574 // floating-point casts: 2575 // icmp slt (bitcast (uitofp X)), 0 --> false 2576 // icmp sgt (bitcast (uitofp X)), -1 --> true 2577 if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) { 2578 if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero())) 2579 return ConstantInt::getFalse(ITy); 2580 if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes())) 2581 return ConstantInt::getTrue(ITy); 2582 } 2583 2584 const APInt *C; 2585 if (!match(RHS, m_APInt(C))) 2586 return nullptr; 2587 2588 // Rule out tautological comparisons (eg., ult 0 or uge 0). 2589 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C); 2590 if (RHS_CR.isEmptySet()) 2591 return ConstantInt::getFalse(ITy); 2592 if (RHS_CR.isFullSet()) 2593 return ConstantInt::getTrue(ITy); 2594 2595 ConstantRange LHS_CR = computeConstantRange(LHS, IIQ.UseInstrInfo); 2596 if (!LHS_CR.isFullSet()) { 2597 if (RHS_CR.contains(LHS_CR)) 2598 return ConstantInt::getTrue(ITy); 2599 if (RHS_CR.inverse().contains(LHS_CR)) 2600 return ConstantInt::getFalse(ITy); 2601 } 2602 2603 return nullptr; 2604 } 2605 2606 /// TODO: A large part of this logic is duplicated in InstCombine's 2607 /// foldICmpBinOp(). We should be able to share that and avoid the code 2608 /// duplication. 2609 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, 2610 Value *RHS, const SimplifyQuery &Q, 2611 unsigned MaxRecurse) { 2612 Type *ITy = GetCompareTy(LHS); // The return type. 2613 2614 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS); 2615 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS); 2616 if (MaxRecurse && (LBO || RBO)) { 2617 // Analyze the case when either LHS or RHS is an add instruction. 2618 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 2619 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null). 2620 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false; 2621 if (LBO && LBO->getOpcode() == Instruction::Add) { 2622 A = LBO->getOperand(0); 2623 B = LBO->getOperand(1); 2624 NoLHSWrapProblem = 2625 ICmpInst::isEquality(Pred) || 2626 (CmpInst::isUnsigned(Pred) && 2627 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) || 2628 (CmpInst::isSigned(Pred) && 2629 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO))); 2630 } 2631 if (RBO && RBO->getOpcode() == Instruction::Add) { 2632 C = RBO->getOperand(0); 2633 D = RBO->getOperand(1); 2634 NoRHSWrapProblem = 2635 ICmpInst::isEquality(Pred) || 2636 (CmpInst::isUnsigned(Pred) && 2637 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) || 2638 (CmpInst::isSigned(Pred) && 2639 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO))); 2640 } 2641 2642 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. 2643 if ((A == RHS || B == RHS) && NoLHSWrapProblem) 2644 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A, 2645 Constant::getNullValue(RHS->getType()), Q, 2646 MaxRecurse - 1)) 2647 return V; 2648 2649 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. 2650 if ((C == LHS || D == LHS) && NoRHSWrapProblem) 2651 if (Value *V = 2652 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()), 2653 C == LHS ? D : C, Q, MaxRecurse - 1)) 2654 return V; 2655 2656 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow. 2657 if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem && 2658 NoRHSWrapProblem) { 2659 // Determine Y and Z in the form icmp (X+Y), (X+Z). 2660 Value *Y, *Z; 2661 if (A == C) { 2662 // C + B == C + D -> B == D 2663 Y = B; 2664 Z = D; 2665 } else if (A == D) { 2666 // D + B == C + D -> B == C 2667 Y = B; 2668 Z = C; 2669 } else if (B == C) { 2670 // A + C == C + D -> A == D 2671 Y = A; 2672 Z = D; 2673 } else { 2674 assert(B == D); 2675 // A + D == C + D -> A == C 2676 Y = A; 2677 Z = C; 2678 } 2679 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1)) 2680 return V; 2681 } 2682 } 2683 2684 { 2685 Value *Y = nullptr; 2686 // icmp pred (or X, Y), X 2687 if (LBO && match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) { 2688 if (Pred == ICmpInst::ICMP_ULT) 2689 return getFalse(ITy); 2690 if (Pred == ICmpInst::ICMP_UGE) 2691 return getTrue(ITy); 2692 2693 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) { 2694 KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2695 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2696 if (RHSKnown.isNonNegative() && YKnown.isNegative()) 2697 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy); 2698 if (RHSKnown.isNegative() || YKnown.isNonNegative()) 2699 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy); 2700 } 2701 } 2702 // icmp pred X, (or X, Y) 2703 if (RBO && match(RBO, m_c_Or(m_Value(Y), m_Specific(LHS)))) { 2704 if (Pred == ICmpInst::ICMP_ULE) 2705 return getTrue(ITy); 2706 if (Pred == ICmpInst::ICMP_UGT) 2707 return getFalse(ITy); 2708 2709 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) { 2710 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2711 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2712 if (LHSKnown.isNonNegative() && YKnown.isNegative()) 2713 return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy); 2714 if (LHSKnown.isNegative() || YKnown.isNonNegative()) 2715 return Pred == ICmpInst::ICMP_SGT ? getFalse(ITy) : getTrue(ITy); 2716 } 2717 } 2718 } 2719 2720 // icmp pred (and X, Y), X 2721 if (LBO && match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) { 2722 if (Pred == ICmpInst::ICMP_UGT) 2723 return getFalse(ITy); 2724 if (Pred == ICmpInst::ICMP_ULE) 2725 return getTrue(ITy); 2726 } 2727 // icmp pred X, (and X, Y) 2728 if (RBO && match(RBO, m_c_And(m_Value(), m_Specific(LHS)))) { 2729 if (Pred == ICmpInst::ICMP_UGE) 2730 return getTrue(ITy); 2731 if (Pred == ICmpInst::ICMP_ULT) 2732 return getFalse(ITy); 2733 } 2734 2735 // 0 - (zext X) pred C 2736 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) { 2737 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) { 2738 if (RHSC->getValue().isStrictlyPositive()) { 2739 if (Pred == ICmpInst::ICMP_SLT) 2740 return ConstantInt::getTrue(RHSC->getContext()); 2741 if (Pred == ICmpInst::ICMP_SGE) 2742 return ConstantInt::getFalse(RHSC->getContext()); 2743 if (Pred == ICmpInst::ICMP_EQ) 2744 return ConstantInt::getFalse(RHSC->getContext()); 2745 if (Pred == ICmpInst::ICMP_NE) 2746 return ConstantInt::getTrue(RHSC->getContext()); 2747 } 2748 if (RHSC->getValue().isNonNegative()) { 2749 if (Pred == ICmpInst::ICMP_SLE) 2750 return ConstantInt::getTrue(RHSC->getContext()); 2751 if (Pred == ICmpInst::ICMP_SGT) 2752 return ConstantInt::getFalse(RHSC->getContext()); 2753 } 2754 } 2755 } 2756 2757 // icmp pred (urem X, Y), Y 2758 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) { 2759 switch (Pred) { 2760 default: 2761 break; 2762 case ICmpInst::ICMP_SGT: 2763 case ICmpInst::ICMP_SGE: { 2764 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2765 if (!Known.isNonNegative()) 2766 break; 2767 LLVM_FALLTHROUGH; 2768 } 2769 case ICmpInst::ICMP_EQ: 2770 case ICmpInst::ICMP_UGT: 2771 case ICmpInst::ICMP_UGE: 2772 return getFalse(ITy); 2773 case ICmpInst::ICMP_SLT: 2774 case ICmpInst::ICMP_SLE: { 2775 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2776 if (!Known.isNonNegative()) 2777 break; 2778 LLVM_FALLTHROUGH; 2779 } 2780 case ICmpInst::ICMP_NE: 2781 case ICmpInst::ICMP_ULT: 2782 case ICmpInst::ICMP_ULE: 2783 return getTrue(ITy); 2784 } 2785 } 2786 2787 // icmp pred X, (urem Y, X) 2788 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) { 2789 switch (Pred) { 2790 default: 2791 break; 2792 case ICmpInst::ICMP_SGT: 2793 case ICmpInst::ICMP_SGE: { 2794 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2795 if (!Known.isNonNegative()) 2796 break; 2797 LLVM_FALLTHROUGH; 2798 } 2799 case ICmpInst::ICMP_NE: 2800 case ICmpInst::ICMP_UGT: 2801 case ICmpInst::ICMP_UGE: 2802 return getTrue(ITy); 2803 case ICmpInst::ICMP_SLT: 2804 case ICmpInst::ICMP_SLE: { 2805 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2806 if (!Known.isNonNegative()) 2807 break; 2808 LLVM_FALLTHROUGH; 2809 } 2810 case ICmpInst::ICMP_EQ: 2811 case ICmpInst::ICMP_ULT: 2812 case ICmpInst::ICMP_ULE: 2813 return getFalse(ITy); 2814 } 2815 } 2816 2817 // x >> y <=u x 2818 // x udiv y <=u x. 2819 if (LBO && (match(LBO, m_LShr(m_Specific(RHS), m_Value())) || 2820 match(LBO, m_UDiv(m_Specific(RHS), m_Value())))) { 2821 // icmp pred (X op Y), X 2822 if (Pred == ICmpInst::ICMP_UGT) 2823 return getFalse(ITy); 2824 if (Pred == ICmpInst::ICMP_ULE) 2825 return getTrue(ITy); 2826 } 2827 2828 // x >=u x >> y 2829 // x >=u x udiv y. 2830 if (RBO && (match(RBO, m_LShr(m_Specific(LHS), m_Value())) || 2831 match(RBO, m_UDiv(m_Specific(LHS), m_Value())))) { 2832 // icmp pred X, (X op Y) 2833 if (Pred == ICmpInst::ICMP_ULT) 2834 return getFalse(ITy); 2835 if (Pred == ICmpInst::ICMP_UGE) 2836 return getTrue(ITy); 2837 } 2838 2839 // handle: 2840 // CI2 << X == CI 2841 // CI2 << X != CI 2842 // 2843 // where CI2 is a power of 2 and CI isn't 2844 if (auto *CI = dyn_cast<ConstantInt>(RHS)) { 2845 const APInt *CI2Val, *CIVal = &CI->getValue(); 2846 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) && 2847 CI2Val->isPowerOf2()) { 2848 if (!CIVal->isPowerOf2()) { 2849 // CI2 << X can equal zero in some circumstances, 2850 // this simplification is unsafe if CI is zero. 2851 // 2852 // We know it is safe if: 2853 // - The shift is nsw, we can't shift out the one bit. 2854 // - The shift is nuw, we can't shift out the one bit. 2855 // - CI2 is one 2856 // - CI isn't zero 2857 if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) || 2858 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) || 2859 CI2Val->isOneValue() || !CI->isZero()) { 2860 if (Pred == ICmpInst::ICMP_EQ) 2861 return ConstantInt::getFalse(RHS->getContext()); 2862 if (Pred == ICmpInst::ICMP_NE) 2863 return ConstantInt::getTrue(RHS->getContext()); 2864 } 2865 } 2866 if (CIVal->isSignMask() && CI2Val->isOneValue()) { 2867 if (Pred == ICmpInst::ICMP_UGT) 2868 return ConstantInt::getFalse(RHS->getContext()); 2869 if (Pred == ICmpInst::ICMP_ULE) 2870 return ConstantInt::getTrue(RHS->getContext()); 2871 } 2872 } 2873 } 2874 2875 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() && 2876 LBO->getOperand(1) == RBO->getOperand(1)) { 2877 switch (LBO->getOpcode()) { 2878 default: 2879 break; 2880 case Instruction::UDiv: 2881 case Instruction::LShr: 2882 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) || 2883 !Q.IIQ.isExact(RBO)) 2884 break; 2885 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2886 RBO->getOperand(0), Q, MaxRecurse - 1)) 2887 return V; 2888 break; 2889 case Instruction::SDiv: 2890 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) || 2891 !Q.IIQ.isExact(RBO)) 2892 break; 2893 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2894 RBO->getOperand(0), Q, MaxRecurse - 1)) 2895 return V; 2896 break; 2897 case Instruction::AShr: 2898 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO)) 2899 break; 2900 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2901 RBO->getOperand(0), Q, MaxRecurse - 1)) 2902 return V; 2903 break; 2904 case Instruction::Shl: { 2905 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO); 2906 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO); 2907 if (!NUW && !NSW) 2908 break; 2909 if (!NSW && ICmpInst::isSigned(Pred)) 2910 break; 2911 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2912 RBO->getOperand(0), Q, MaxRecurse - 1)) 2913 return V; 2914 break; 2915 } 2916 } 2917 } 2918 return nullptr; 2919 } 2920 2921 /// Simplify integer comparisons where at least one operand of the compare 2922 /// matches an integer min/max idiom. 2923 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS, 2924 Value *RHS, const SimplifyQuery &Q, 2925 unsigned MaxRecurse) { 2926 Type *ITy = GetCompareTy(LHS); // The return type. 2927 Value *A, *B; 2928 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE; 2929 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B". 2930 2931 // Signed variants on "max(a,b)>=a -> true". 2932 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 2933 if (A != RHS) 2934 std::swap(A, B); // smax(A, B) pred A. 2935 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 2936 // We analyze this as smax(A, B) pred A. 2937 P = Pred; 2938 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) && 2939 (A == LHS || B == LHS)) { 2940 if (A != LHS) 2941 std::swap(A, B); // A pred smax(A, B). 2942 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 2943 // We analyze this as smax(A, B) swapped-pred A. 2944 P = CmpInst::getSwappedPredicate(Pred); 2945 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 2946 (A == RHS || B == RHS)) { 2947 if (A != RHS) 2948 std::swap(A, B); // smin(A, B) pred A. 2949 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 2950 // We analyze this as smax(-A, -B) swapped-pred -A. 2951 // Note that we do not need to actually form -A or -B thanks to EqP. 2952 P = CmpInst::getSwappedPredicate(Pred); 2953 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) && 2954 (A == LHS || B == LHS)) { 2955 if (A != LHS) 2956 std::swap(A, B); // A pred smin(A, B). 2957 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 2958 // We analyze this as smax(-A, -B) pred -A. 2959 // Note that we do not need to actually form -A or -B thanks to EqP. 2960 P = Pred; 2961 } 2962 if (P != CmpInst::BAD_ICMP_PREDICATE) { 2963 // Cases correspond to "max(A, B) p A". 2964 switch (P) { 2965 default: 2966 break; 2967 case CmpInst::ICMP_EQ: 2968 case CmpInst::ICMP_SLE: 2969 // Equivalent to "A EqP B". This may be the same as the condition tested 2970 // in the max/min; if so, we can just return that. 2971 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 2972 return V; 2973 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 2974 return V; 2975 // Otherwise, see if "A EqP B" simplifies. 2976 if (MaxRecurse) 2977 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1)) 2978 return V; 2979 break; 2980 case CmpInst::ICMP_NE: 2981 case CmpInst::ICMP_SGT: { 2982 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 2983 // Equivalent to "A InvEqP B". This may be the same as the condition 2984 // tested in the max/min; if so, we can just return that. 2985 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 2986 return V; 2987 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 2988 return V; 2989 // Otherwise, see if "A InvEqP B" simplifies. 2990 if (MaxRecurse) 2991 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1)) 2992 return V; 2993 break; 2994 } 2995 case CmpInst::ICMP_SGE: 2996 // Always true. 2997 return getTrue(ITy); 2998 case CmpInst::ICMP_SLT: 2999 // Always false. 3000 return getFalse(ITy); 3001 } 3002 } 3003 3004 // Unsigned variants on "max(a,b)>=a -> true". 3005 P = CmpInst::BAD_ICMP_PREDICATE; 3006 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 3007 if (A != RHS) 3008 std::swap(A, B); // umax(A, B) pred A. 3009 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 3010 // We analyze this as umax(A, B) pred A. 3011 P = Pred; 3012 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) && 3013 (A == LHS || B == LHS)) { 3014 if (A != LHS) 3015 std::swap(A, B); // A pred umax(A, B). 3016 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 3017 // We analyze this as umax(A, B) swapped-pred A. 3018 P = CmpInst::getSwappedPredicate(Pred); 3019 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 3020 (A == RHS || B == RHS)) { 3021 if (A != RHS) 3022 std::swap(A, B); // umin(A, B) pred A. 3023 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 3024 // We analyze this as umax(-A, -B) swapped-pred -A. 3025 // Note that we do not need to actually form -A or -B thanks to EqP. 3026 P = CmpInst::getSwappedPredicate(Pred); 3027 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) && 3028 (A == LHS || B == LHS)) { 3029 if (A != LHS) 3030 std::swap(A, B); // A pred umin(A, B). 3031 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 3032 // We analyze this as umax(-A, -B) pred -A. 3033 // Note that we do not need to actually form -A or -B thanks to EqP. 3034 P = Pred; 3035 } 3036 if (P != CmpInst::BAD_ICMP_PREDICATE) { 3037 // Cases correspond to "max(A, B) p A". 3038 switch (P) { 3039 default: 3040 break; 3041 case CmpInst::ICMP_EQ: 3042 case CmpInst::ICMP_ULE: 3043 // Equivalent to "A EqP B". This may be the same as the condition tested 3044 // in the max/min; if so, we can just return that. 3045 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 3046 return V; 3047 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 3048 return V; 3049 // Otherwise, see if "A EqP B" simplifies. 3050 if (MaxRecurse) 3051 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1)) 3052 return V; 3053 break; 3054 case CmpInst::ICMP_NE: 3055 case CmpInst::ICMP_UGT: { 3056 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 3057 // Equivalent to "A InvEqP B". This may be the same as the condition 3058 // tested in the max/min; if so, we can just return that. 3059 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 3060 return V; 3061 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 3062 return V; 3063 // Otherwise, see if "A InvEqP B" simplifies. 3064 if (MaxRecurse) 3065 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1)) 3066 return V; 3067 break; 3068 } 3069 case CmpInst::ICMP_UGE: 3070 // Always true. 3071 return getTrue(ITy); 3072 case CmpInst::ICMP_ULT: 3073 // Always false. 3074 return getFalse(ITy); 3075 } 3076 } 3077 3078 // Variants on "max(x,y) >= min(x,z)". 3079 Value *C, *D; 3080 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && 3081 match(RHS, m_SMin(m_Value(C), m_Value(D))) && 3082 (A == C || A == D || B == C || B == D)) { 3083 // max(x, ?) pred min(x, ?). 3084 if (Pred == CmpInst::ICMP_SGE) 3085 // Always true. 3086 return getTrue(ITy); 3087 if (Pred == CmpInst::ICMP_SLT) 3088 // Always false. 3089 return getFalse(ITy); 3090 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 3091 match(RHS, m_SMax(m_Value(C), m_Value(D))) && 3092 (A == C || A == D || B == C || B == D)) { 3093 // min(x, ?) pred max(x, ?). 3094 if (Pred == CmpInst::ICMP_SLE) 3095 // Always true. 3096 return getTrue(ITy); 3097 if (Pred == CmpInst::ICMP_SGT) 3098 // Always false. 3099 return getFalse(ITy); 3100 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && 3101 match(RHS, m_UMin(m_Value(C), m_Value(D))) && 3102 (A == C || A == D || B == C || B == D)) { 3103 // max(x, ?) pred min(x, ?). 3104 if (Pred == CmpInst::ICMP_UGE) 3105 // Always true. 3106 return getTrue(ITy); 3107 if (Pred == CmpInst::ICMP_ULT) 3108 // Always false. 3109 return getFalse(ITy); 3110 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 3111 match(RHS, m_UMax(m_Value(C), m_Value(D))) && 3112 (A == C || A == D || B == C || B == D)) { 3113 // min(x, ?) pred max(x, ?). 3114 if (Pred == CmpInst::ICMP_ULE) 3115 // Always true. 3116 return getTrue(ITy); 3117 if (Pred == CmpInst::ICMP_UGT) 3118 // Always false. 3119 return getFalse(ITy); 3120 } 3121 3122 return nullptr; 3123 } 3124 3125 /// Given operands for an ICmpInst, see if we can fold the result. 3126 /// If not, this returns null. 3127 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3128 const SimplifyQuery &Q, unsigned MaxRecurse) { 3129 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 3130 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!"); 3131 3132 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 3133 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 3134 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 3135 3136 // If we have a constant, make sure it is on the RHS. 3137 std::swap(LHS, RHS); 3138 Pred = CmpInst::getSwappedPredicate(Pred); 3139 } 3140 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X"); 3141 3142 Type *ITy = GetCompareTy(LHS); // The return type. 3143 3144 // For EQ and NE, we can always pick a value for the undef to make the 3145 // predicate pass or fail, so we can return undef. 3146 // Matches behavior in llvm::ConstantFoldCompareInstruction. 3147 if (isa<UndefValue>(RHS) && ICmpInst::isEquality(Pred)) 3148 return UndefValue::get(ITy); 3149 3150 // icmp X, X -> true/false 3151 // icmp X, undef -> true/false because undef could be X. 3152 if (LHS == RHS || isa<UndefValue>(RHS)) 3153 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred)); 3154 3155 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q)) 3156 return V; 3157 3158 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q)) 3159 return V; 3160 3161 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ)) 3162 return V; 3163 3164 // If both operands have range metadata, use the metadata 3165 // to simplify the comparison. 3166 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) { 3167 auto RHS_Instr = cast<Instruction>(RHS); 3168 auto LHS_Instr = cast<Instruction>(LHS); 3169 3170 if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) && 3171 Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) { 3172 auto RHS_CR = getConstantRangeFromMetadata( 3173 *RHS_Instr->getMetadata(LLVMContext::MD_range)); 3174 auto LHS_CR = getConstantRangeFromMetadata( 3175 *LHS_Instr->getMetadata(LLVMContext::MD_range)); 3176 3177 auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR); 3178 if (Satisfied_CR.contains(LHS_CR)) 3179 return ConstantInt::getTrue(RHS->getContext()); 3180 3181 auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion( 3182 CmpInst::getInversePredicate(Pred), RHS_CR); 3183 if (InversedSatisfied_CR.contains(LHS_CR)) 3184 return ConstantInt::getFalse(RHS->getContext()); 3185 } 3186 } 3187 3188 // Compare of cast, for example (zext X) != 0 -> X != 0 3189 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) { 3190 Instruction *LI = cast<CastInst>(LHS); 3191 Value *SrcOp = LI->getOperand(0); 3192 Type *SrcTy = SrcOp->getType(); 3193 Type *DstTy = LI->getType(); 3194 3195 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input 3196 // if the integer type is the same size as the pointer type. 3197 if (MaxRecurse && isa<PtrToIntInst>(LI) && 3198 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) { 3199 if (Constant *RHSC = dyn_cast<Constant>(RHS)) { 3200 // Transfer the cast to the constant. 3201 if (Value *V = SimplifyICmpInst(Pred, SrcOp, 3202 ConstantExpr::getIntToPtr(RHSC, SrcTy), 3203 Q, MaxRecurse-1)) 3204 return V; 3205 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) { 3206 if (RI->getOperand(0)->getType() == SrcTy) 3207 // Compare without the cast. 3208 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 3209 Q, MaxRecurse-1)) 3210 return V; 3211 } 3212 } 3213 3214 if (isa<ZExtInst>(LHS)) { 3215 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the 3216 // same type. 3217 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) { 3218 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 3219 // Compare X and Y. Note that signed predicates become unsigned. 3220 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 3221 SrcOp, RI->getOperand(0), Q, 3222 MaxRecurse-1)) 3223 return V; 3224 } 3225 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended 3226 // too. If not, then try to deduce the result of the comparison. 3227 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 3228 // Compute the constant that would happen if we truncated to SrcTy then 3229 // reextended to DstTy. 3230 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 3231 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy); 3232 3233 // If the re-extended constant didn't change then this is effectively 3234 // also a case of comparing two zero-extended values. 3235 if (RExt == CI && MaxRecurse) 3236 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 3237 SrcOp, Trunc, Q, MaxRecurse-1)) 3238 return V; 3239 3240 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit 3241 // there. Use this to work out the result of the comparison. 3242 if (RExt != CI) { 3243 switch (Pred) { 3244 default: llvm_unreachable("Unknown ICmp predicate!"); 3245 // LHS <u RHS. 3246 case ICmpInst::ICMP_EQ: 3247 case ICmpInst::ICMP_UGT: 3248 case ICmpInst::ICMP_UGE: 3249 return ConstantInt::getFalse(CI->getContext()); 3250 3251 case ICmpInst::ICMP_NE: 3252 case ICmpInst::ICMP_ULT: 3253 case ICmpInst::ICMP_ULE: 3254 return ConstantInt::getTrue(CI->getContext()); 3255 3256 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS 3257 // is non-negative then LHS <s RHS. 3258 case ICmpInst::ICMP_SGT: 3259 case ICmpInst::ICMP_SGE: 3260 return CI->getValue().isNegative() ? 3261 ConstantInt::getTrue(CI->getContext()) : 3262 ConstantInt::getFalse(CI->getContext()); 3263 3264 case ICmpInst::ICMP_SLT: 3265 case ICmpInst::ICMP_SLE: 3266 return CI->getValue().isNegative() ? 3267 ConstantInt::getFalse(CI->getContext()) : 3268 ConstantInt::getTrue(CI->getContext()); 3269 } 3270 } 3271 } 3272 } 3273 3274 if (isa<SExtInst>(LHS)) { 3275 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the 3276 // same type. 3277 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) { 3278 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 3279 // Compare X and Y. Note that the predicate does not change. 3280 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 3281 Q, MaxRecurse-1)) 3282 return V; 3283 } 3284 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended 3285 // too. If not, then try to deduce the result of the comparison. 3286 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 3287 // Compute the constant that would happen if we truncated to SrcTy then 3288 // reextended to DstTy. 3289 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 3290 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy); 3291 3292 // If the re-extended constant didn't change then this is effectively 3293 // also a case of comparing two sign-extended values. 3294 if (RExt == CI && MaxRecurse) 3295 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1)) 3296 return V; 3297 3298 // Otherwise the upper bits of LHS are all equal, while RHS has varying 3299 // bits there. Use this to work out the result of the comparison. 3300 if (RExt != CI) { 3301 switch (Pred) { 3302 default: llvm_unreachable("Unknown ICmp predicate!"); 3303 case ICmpInst::ICMP_EQ: 3304 return ConstantInt::getFalse(CI->getContext()); 3305 case ICmpInst::ICMP_NE: 3306 return ConstantInt::getTrue(CI->getContext()); 3307 3308 // If RHS is non-negative then LHS <s RHS. If RHS is negative then 3309 // LHS >s RHS. 3310 case ICmpInst::ICMP_SGT: 3311 case ICmpInst::ICMP_SGE: 3312 return CI->getValue().isNegative() ? 3313 ConstantInt::getTrue(CI->getContext()) : 3314 ConstantInt::getFalse(CI->getContext()); 3315 case ICmpInst::ICMP_SLT: 3316 case ICmpInst::ICMP_SLE: 3317 return CI->getValue().isNegative() ? 3318 ConstantInt::getFalse(CI->getContext()) : 3319 ConstantInt::getTrue(CI->getContext()); 3320 3321 // If LHS is non-negative then LHS <u RHS. If LHS is negative then 3322 // LHS >u RHS. 3323 case ICmpInst::ICMP_UGT: 3324 case ICmpInst::ICMP_UGE: 3325 // Comparison is true iff the LHS <s 0. 3326 if (MaxRecurse) 3327 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp, 3328 Constant::getNullValue(SrcTy), 3329 Q, MaxRecurse-1)) 3330 return V; 3331 break; 3332 case ICmpInst::ICMP_ULT: 3333 case ICmpInst::ICMP_ULE: 3334 // Comparison is true iff the LHS >=s 0. 3335 if (MaxRecurse) 3336 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp, 3337 Constant::getNullValue(SrcTy), 3338 Q, MaxRecurse-1)) 3339 return V; 3340 break; 3341 } 3342 } 3343 } 3344 } 3345 } 3346 3347 // icmp eq|ne X, Y -> false|true if X != Y 3348 if (ICmpInst::isEquality(Pred) && 3349 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) { 3350 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy); 3351 } 3352 3353 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse)) 3354 return V; 3355 3356 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse)) 3357 return V; 3358 3359 // Simplify comparisons of related pointers using a powerful, recursive 3360 // GEP-walk when we have target data available.. 3361 if (LHS->getType()->isPointerTy()) 3362 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, 3363 Q.IIQ, LHS, RHS)) 3364 return C; 3365 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS)) 3366 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS)) 3367 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) == 3368 Q.DL.getTypeSizeInBits(CLHS->getType()) && 3369 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) == 3370 Q.DL.getTypeSizeInBits(CRHS->getType())) 3371 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, 3372 Q.IIQ, CLHS->getPointerOperand(), 3373 CRHS->getPointerOperand())) 3374 return C; 3375 3376 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) { 3377 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) { 3378 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() && 3379 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() && 3380 (ICmpInst::isEquality(Pred) || 3381 (GLHS->isInBounds() && GRHS->isInBounds() && 3382 Pred == ICmpInst::getSignedPredicate(Pred)))) { 3383 // The bases are equal and the indices are constant. Build a constant 3384 // expression GEP with the same indices and a null base pointer to see 3385 // what constant folding can make out of it. 3386 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType()); 3387 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end()); 3388 Constant *NewLHS = ConstantExpr::getGetElementPtr( 3389 GLHS->getSourceElementType(), Null, IndicesLHS); 3390 3391 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end()); 3392 Constant *NewRHS = ConstantExpr::getGetElementPtr( 3393 GLHS->getSourceElementType(), Null, IndicesRHS); 3394 return ConstantExpr::getICmp(Pred, NewLHS, NewRHS); 3395 } 3396 } 3397 } 3398 3399 // If the comparison is with the result of a select instruction, check whether 3400 // comparing with either branch of the select always yields the same value. 3401 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 3402 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 3403 return V; 3404 3405 // If the comparison is with the result of a phi instruction, check whether 3406 // doing the compare with each incoming phi value yields a common result. 3407 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 3408 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 3409 return V; 3410 3411 return nullptr; 3412 } 3413 3414 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3415 const SimplifyQuery &Q) { 3416 return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit); 3417 } 3418 3419 /// Given operands for an FCmpInst, see if we can fold the result. 3420 /// If not, this returns null. 3421 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3422 FastMathFlags FMF, const SimplifyQuery &Q, 3423 unsigned MaxRecurse) { 3424 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 3425 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!"); 3426 3427 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 3428 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 3429 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 3430 3431 // If we have a constant, make sure it is on the RHS. 3432 std::swap(LHS, RHS); 3433 Pred = CmpInst::getSwappedPredicate(Pred); 3434 } 3435 3436 // Fold trivial predicates. 3437 Type *RetTy = GetCompareTy(LHS); 3438 if (Pred == FCmpInst::FCMP_FALSE) 3439 return getFalse(RetTy); 3440 if (Pred == FCmpInst::FCMP_TRUE) 3441 return getTrue(RetTy); 3442 3443 // Fold (un)ordered comparison if we can determine there are no NaNs. 3444 if (Pred == FCmpInst::FCMP_UNO || Pred == FCmpInst::FCMP_ORD) 3445 if (FMF.noNaNs() || 3446 (isKnownNeverNaN(LHS, Q.TLI) && isKnownNeverNaN(RHS, Q.TLI))) 3447 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD); 3448 3449 // NaN is unordered; NaN is not ordered. 3450 assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) && 3451 "Comparison must be either ordered or unordered"); 3452 if (match(RHS, m_NaN())) 3453 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred)); 3454 3455 // fcmp pred x, undef and fcmp pred undef, x 3456 // fold to true if unordered, false if ordered 3457 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) { 3458 // Choosing NaN for the undef will always make unordered comparison succeed 3459 // and ordered comparison fail. 3460 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred)); 3461 } 3462 3463 // fcmp x,x -> true/false. Not all compares are foldable. 3464 if (LHS == RHS) { 3465 if (CmpInst::isTrueWhenEqual(Pred)) 3466 return getTrue(RetTy); 3467 if (CmpInst::isFalseWhenEqual(Pred)) 3468 return getFalse(RetTy); 3469 } 3470 3471 // Handle fcmp with constant RHS. 3472 // TODO: Use match with a specific FP value, so these work with vectors with 3473 // undef lanes. 3474 const APFloat *C; 3475 if (match(RHS, m_APFloat(C))) { 3476 // Check whether the constant is an infinity. 3477 if (C->isInfinity()) { 3478 if (C->isNegative()) { 3479 switch (Pred) { 3480 case FCmpInst::FCMP_OLT: 3481 // No value is ordered and less than negative infinity. 3482 return getFalse(RetTy); 3483 case FCmpInst::FCMP_UGE: 3484 // All values are unordered with or at least negative infinity. 3485 return getTrue(RetTy); 3486 default: 3487 break; 3488 } 3489 } else { 3490 switch (Pred) { 3491 case FCmpInst::FCMP_OGT: 3492 // No value is ordered and greater than infinity. 3493 return getFalse(RetTy); 3494 case FCmpInst::FCMP_ULE: 3495 // All values are unordered with and at most infinity. 3496 return getTrue(RetTy); 3497 default: 3498 break; 3499 } 3500 } 3501 } 3502 if (C->isNegative() && !C->isNegZero()) { 3503 assert(!C->isNaN() && "Unexpected NaN constant!"); 3504 // TODO: We can catch more cases by using a range check rather than 3505 // relying on CannotBeOrderedLessThanZero. 3506 switch (Pred) { 3507 case FCmpInst::FCMP_UGE: 3508 case FCmpInst::FCMP_UGT: 3509 case FCmpInst::FCMP_UNE: 3510 // (X >= 0) implies (X > C) when (C < 0) 3511 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3512 return getTrue(RetTy); 3513 break; 3514 case FCmpInst::FCMP_OEQ: 3515 case FCmpInst::FCMP_OLE: 3516 case FCmpInst::FCMP_OLT: 3517 // (X >= 0) implies !(X < C) when (C < 0) 3518 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3519 return getFalse(RetTy); 3520 break; 3521 default: 3522 break; 3523 } 3524 } 3525 3526 // Check comparison of [minnum/maxnum with constant] with other constant. 3527 const APFloat *C2; 3528 if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) && 3529 C2->compare(*C) == APFloat::cmpLessThan) || 3530 (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) && 3531 C2->compare(*C) == APFloat::cmpGreaterThan)) { 3532 bool IsMaxNum = 3533 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum; 3534 // The ordered relationship and minnum/maxnum guarantee that we do not 3535 // have NaN constants, so ordered/unordered preds are handled the same. 3536 switch (Pred) { 3537 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_UEQ: 3538 // minnum(X, LesserC) == C --> false 3539 // maxnum(X, GreaterC) == C --> false 3540 return getFalse(RetTy); 3541 case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_UNE: 3542 // minnum(X, LesserC) != C --> true 3543 // maxnum(X, GreaterC) != C --> true 3544 return getTrue(RetTy); 3545 case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_UGE: 3546 case FCmpInst::FCMP_OGT: case FCmpInst::FCMP_UGT: 3547 // minnum(X, LesserC) >= C --> false 3548 // minnum(X, LesserC) > C --> false 3549 // maxnum(X, GreaterC) >= C --> true 3550 // maxnum(X, GreaterC) > C --> true 3551 return ConstantInt::get(RetTy, IsMaxNum); 3552 case FCmpInst::FCMP_OLE: case FCmpInst::FCMP_ULE: 3553 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_ULT: 3554 // minnum(X, LesserC) <= C --> true 3555 // minnum(X, LesserC) < C --> true 3556 // maxnum(X, GreaterC) <= C --> false 3557 // maxnum(X, GreaterC) < C --> false 3558 return ConstantInt::get(RetTy, !IsMaxNum); 3559 default: 3560 // TRUE/FALSE/ORD/UNO should be handled before this. 3561 llvm_unreachable("Unexpected fcmp predicate"); 3562 } 3563 } 3564 } 3565 3566 if (match(RHS, m_AnyZeroFP())) { 3567 switch (Pred) { 3568 case FCmpInst::FCMP_OGE: 3569 case FCmpInst::FCMP_ULT: 3570 // Positive or zero X >= 0.0 --> true 3571 // Positive or zero X < 0.0 --> false 3572 if ((FMF.noNaNs() || isKnownNeverNaN(LHS, Q.TLI)) && 3573 CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3574 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy); 3575 break; 3576 case FCmpInst::FCMP_UGE: 3577 case FCmpInst::FCMP_OLT: 3578 // Positive or zero or nan X >= 0.0 --> true 3579 // Positive or zero or nan X < 0.0 --> false 3580 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3581 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy); 3582 break; 3583 default: 3584 break; 3585 } 3586 } 3587 3588 // If the comparison is with the result of a select instruction, check whether 3589 // comparing with either branch of the select always yields the same value. 3590 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 3591 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 3592 return V; 3593 3594 // If the comparison is with the result of a phi instruction, check whether 3595 // doing the compare with each incoming phi value yields a common result. 3596 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 3597 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 3598 return V; 3599 3600 return nullptr; 3601 } 3602 3603 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3604 FastMathFlags FMF, const SimplifyQuery &Q) { 3605 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit); 3606 } 3607 3608 /// See if V simplifies when its operand Op is replaced with RepOp. 3609 static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, 3610 const SimplifyQuery &Q, 3611 unsigned MaxRecurse) { 3612 // Trivial replacement. 3613 if (V == Op) 3614 return RepOp; 3615 3616 // We cannot replace a constant, and shouldn't even try. 3617 if (isa<Constant>(Op)) 3618 return nullptr; 3619 3620 auto *I = dyn_cast<Instruction>(V); 3621 if (!I) 3622 return nullptr; 3623 3624 // If this is a binary operator, try to simplify it with the replaced op. 3625 if (auto *B = dyn_cast<BinaryOperator>(I)) { 3626 // Consider: 3627 // %cmp = icmp eq i32 %x, 2147483647 3628 // %add = add nsw i32 %x, 1 3629 // %sel = select i1 %cmp, i32 -2147483648, i32 %add 3630 // 3631 // We can't replace %sel with %add unless we strip away the flags. 3632 // TODO: This is an unusual limitation because better analysis results in 3633 // worse simplification. InstCombine can do this fold more generally 3634 // by dropping the flags. Remove this fold to save compile-time? 3635 if (isa<OverflowingBinaryOperator>(B)) 3636 if (Q.IIQ.hasNoSignedWrap(B) || Q.IIQ.hasNoUnsignedWrap(B)) 3637 return nullptr; 3638 if (isa<PossiblyExactOperator>(B) && Q.IIQ.isExact(B)) 3639 return nullptr; 3640 3641 if (MaxRecurse) { 3642 if (B->getOperand(0) == Op) 3643 return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q, 3644 MaxRecurse - 1); 3645 if (B->getOperand(1) == Op) 3646 return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q, 3647 MaxRecurse - 1); 3648 } 3649 } 3650 3651 // Same for CmpInsts. 3652 if (CmpInst *C = dyn_cast<CmpInst>(I)) { 3653 if (MaxRecurse) { 3654 if (C->getOperand(0) == Op) 3655 return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q, 3656 MaxRecurse - 1); 3657 if (C->getOperand(1) == Op) 3658 return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q, 3659 MaxRecurse - 1); 3660 } 3661 } 3662 3663 // Same for GEPs. 3664 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 3665 if (MaxRecurse) { 3666 SmallVector<Value *, 8> NewOps(GEP->getNumOperands()); 3667 transform(GEP->operands(), NewOps.begin(), 3668 [&](Value *V) { return V == Op ? RepOp : V; }); 3669 return SimplifyGEPInst(GEP->getSourceElementType(), NewOps, Q, 3670 MaxRecurse - 1); 3671 } 3672 } 3673 3674 // TODO: We could hand off more cases to instsimplify here. 3675 3676 // If all operands are constant after substituting Op for RepOp then we can 3677 // constant fold the instruction. 3678 if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) { 3679 // Build a list of all constant operands. 3680 SmallVector<Constant *, 8> ConstOps; 3681 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3682 if (I->getOperand(i) == Op) 3683 ConstOps.push_back(CRepOp); 3684 else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i))) 3685 ConstOps.push_back(COp); 3686 else 3687 break; 3688 } 3689 3690 // All operands were constants, fold it. 3691 if (ConstOps.size() == I->getNumOperands()) { 3692 if (CmpInst *C = dyn_cast<CmpInst>(I)) 3693 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0], 3694 ConstOps[1], Q.DL, Q.TLI); 3695 3696 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 3697 if (!LI->isVolatile()) 3698 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL); 3699 3700 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI); 3701 } 3702 } 3703 3704 return nullptr; 3705 } 3706 3707 /// Try to simplify a select instruction when its condition operand is an 3708 /// integer comparison where one operand of the compare is a constant. 3709 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, 3710 const APInt *Y, bool TrueWhenUnset) { 3711 const APInt *C; 3712 3713 // (X & Y) == 0 ? X & ~Y : X --> X 3714 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y 3715 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) && 3716 *Y == ~*C) 3717 return TrueWhenUnset ? FalseVal : TrueVal; 3718 3719 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y 3720 // (X & Y) != 0 ? X : X & ~Y --> X 3721 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) && 3722 *Y == ~*C) 3723 return TrueWhenUnset ? FalseVal : TrueVal; 3724 3725 if (Y->isPowerOf2()) { 3726 // (X & Y) == 0 ? X | Y : X --> X | Y 3727 // (X & Y) != 0 ? X | Y : X --> X 3728 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) && 3729 *Y == *C) 3730 return TrueWhenUnset ? TrueVal : FalseVal; 3731 3732 // (X & Y) == 0 ? X : X | Y --> X 3733 // (X & Y) != 0 ? X : X | Y --> X | Y 3734 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) && 3735 *Y == *C) 3736 return TrueWhenUnset ? TrueVal : FalseVal; 3737 } 3738 3739 return nullptr; 3740 } 3741 3742 /// An alternative way to test if a bit is set or not uses sgt/slt instead of 3743 /// eq/ne. 3744 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS, 3745 ICmpInst::Predicate Pred, 3746 Value *TrueVal, Value *FalseVal) { 3747 Value *X; 3748 APInt Mask; 3749 if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask)) 3750 return nullptr; 3751 3752 return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask, 3753 Pred == ICmpInst::ICMP_EQ); 3754 } 3755 3756 /// Try to simplify a select instruction when its condition operand is an 3757 /// integer comparison. 3758 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, 3759 Value *FalseVal, const SimplifyQuery &Q, 3760 unsigned MaxRecurse) { 3761 ICmpInst::Predicate Pred; 3762 Value *CmpLHS, *CmpRHS; 3763 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS)))) 3764 return nullptr; 3765 3766 if (ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero())) { 3767 Value *X; 3768 const APInt *Y; 3769 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y)))) 3770 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y, 3771 Pred == ICmpInst::ICMP_EQ)) 3772 return V; 3773 3774 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate. 3775 Value *ShAmt; 3776 auto isFsh = m_CombineOr(m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), 3777 m_Value(ShAmt)), 3778 m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), 3779 m_Value(ShAmt))); 3780 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X 3781 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X 3782 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt && 3783 Pred == ICmpInst::ICMP_EQ) 3784 return X; 3785 // (ShAmt != 0) ? X : fshl(X, *, ShAmt) --> X 3786 // (ShAmt != 0) ? X : fshr(*, X, ShAmt) --> X 3787 if (match(FalseVal, isFsh) && TrueVal == X && CmpLHS == ShAmt && 3788 Pred == ICmpInst::ICMP_NE) 3789 return X; 3790 3791 // Test for a zero-shift-guard-op around rotates. These are used to 3792 // avoid UB from oversized shifts in raw IR rotate patterns, but the 3793 // intrinsics do not have that problem. 3794 // We do not allow this transform for the general funnel shift case because 3795 // that would not preserve the poison safety of the original code. 3796 auto isRotate = m_CombineOr(m_Intrinsic<Intrinsic::fshl>(m_Value(X), 3797 m_Deferred(X), 3798 m_Value(ShAmt)), 3799 m_Intrinsic<Intrinsic::fshr>(m_Value(X), 3800 m_Deferred(X), 3801 m_Value(ShAmt))); 3802 // (ShAmt != 0) ? fshl(X, X, ShAmt) : X --> fshl(X, X, ShAmt) 3803 // (ShAmt != 0) ? fshr(X, X, ShAmt) : X --> fshr(X, X, ShAmt) 3804 if (match(TrueVal, isRotate) && FalseVal == X && CmpLHS == ShAmt && 3805 Pred == ICmpInst::ICMP_NE) 3806 return TrueVal; 3807 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt) 3808 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt) 3809 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt && 3810 Pred == ICmpInst::ICMP_EQ) 3811 return FalseVal; 3812 } 3813 3814 // Check for other compares that behave like bit test. 3815 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred, 3816 TrueVal, FalseVal)) 3817 return V; 3818 3819 // If we have an equality comparison, then we know the value in one of the 3820 // arms of the select. See if substituting this value into the arm and 3821 // simplifying the result yields the same value as the other arm. 3822 if (Pred == ICmpInst::ICMP_EQ) { 3823 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3824 TrueVal || 3825 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3826 TrueVal) 3827 return FalseVal; 3828 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3829 FalseVal || 3830 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3831 FalseVal) 3832 return FalseVal; 3833 } else if (Pred == ICmpInst::ICMP_NE) { 3834 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3835 FalseVal || 3836 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3837 FalseVal) 3838 return TrueVal; 3839 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3840 TrueVal || 3841 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3842 TrueVal) 3843 return TrueVal; 3844 } 3845 3846 return nullptr; 3847 } 3848 3849 /// Try to simplify a select instruction when its condition operand is a 3850 /// floating-point comparison. 3851 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F) { 3852 FCmpInst::Predicate Pred; 3853 if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) && 3854 !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T)))) 3855 return nullptr; 3856 3857 // TODO: The transform may not be valid with -0.0. An incomplete way of 3858 // testing for that possibility is to check if at least one operand is a 3859 // non-zero constant. 3860 const APFloat *C; 3861 if ((match(T, m_APFloat(C)) && C->isNonZero()) || 3862 (match(F, m_APFloat(C)) && C->isNonZero())) { 3863 // (T == F) ? T : F --> F 3864 // (F == T) ? T : F --> F 3865 if (Pred == FCmpInst::FCMP_OEQ) 3866 return F; 3867 3868 // (T != F) ? T : F --> T 3869 // (F != T) ? T : F --> T 3870 if (Pred == FCmpInst::FCMP_UNE) 3871 return T; 3872 } 3873 3874 return nullptr; 3875 } 3876 3877 /// Given operands for a SelectInst, see if we can fold the result. 3878 /// If not, this returns null. 3879 static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, 3880 const SimplifyQuery &Q, unsigned MaxRecurse) { 3881 if (auto *CondC = dyn_cast<Constant>(Cond)) { 3882 if (auto *TrueC = dyn_cast<Constant>(TrueVal)) 3883 if (auto *FalseC = dyn_cast<Constant>(FalseVal)) 3884 return ConstantFoldSelectInstruction(CondC, TrueC, FalseC); 3885 3886 // select undef, X, Y -> X or Y 3887 if (isa<UndefValue>(CondC)) 3888 return isa<Constant>(FalseVal) ? FalseVal : TrueVal; 3889 3890 // TODO: Vector constants with undef elements don't simplify. 3891 3892 // select true, X, Y -> X 3893 if (CondC->isAllOnesValue()) 3894 return TrueVal; 3895 // select false, X, Y -> Y 3896 if (CondC->isNullValue()) 3897 return FalseVal; 3898 } 3899 3900 // select ?, X, X -> X 3901 if (TrueVal == FalseVal) 3902 return TrueVal; 3903 3904 if (isa<UndefValue>(TrueVal)) // select ?, undef, X -> X 3905 return FalseVal; 3906 if (isa<UndefValue>(FalseVal)) // select ?, X, undef -> X 3907 return TrueVal; 3908 3909 if (Value *V = 3910 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse)) 3911 return V; 3912 3913 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal)) 3914 return V; 3915 3916 if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal)) 3917 return V; 3918 3919 Optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL); 3920 if (Imp) 3921 return *Imp ? TrueVal : FalseVal; 3922 3923 return nullptr; 3924 } 3925 3926 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, 3927 const SimplifyQuery &Q) { 3928 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit); 3929 } 3930 3931 /// Given operands for an GetElementPtrInst, see if we can fold the result. 3932 /// If not, this returns null. 3933 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, 3934 const SimplifyQuery &Q, unsigned) { 3935 // The type of the GEP pointer operand. 3936 unsigned AS = 3937 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace(); 3938 3939 // getelementptr P -> P. 3940 if (Ops.size() == 1) 3941 return Ops[0]; 3942 3943 // Compute the (pointer) type returned by the GEP instruction. 3944 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1)); 3945 Type *GEPTy = PointerType::get(LastType, AS); 3946 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType())) 3947 GEPTy = VectorType::get(GEPTy, VT->getNumElements()); 3948 else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType())) 3949 GEPTy = VectorType::get(GEPTy, VT->getNumElements()); 3950 3951 if (isa<UndefValue>(Ops[0])) 3952 return UndefValue::get(GEPTy); 3953 3954 if (Ops.size() == 2) { 3955 // getelementptr P, 0 -> P. 3956 if (match(Ops[1], m_Zero()) && Ops[0]->getType() == GEPTy) 3957 return Ops[0]; 3958 3959 Type *Ty = SrcTy; 3960 if (Ty->isSized()) { 3961 Value *P; 3962 uint64_t C; 3963 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty); 3964 // getelementptr P, N -> P if P points to a type of zero size. 3965 if (TyAllocSize == 0 && Ops[0]->getType() == GEPTy) 3966 return Ops[0]; 3967 3968 // The following transforms are only safe if the ptrtoint cast 3969 // doesn't truncate the pointers. 3970 if (Ops[1]->getType()->getScalarSizeInBits() == 3971 Q.DL.getIndexSizeInBits(AS)) { 3972 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * { 3973 if (match(P, m_Zero())) 3974 return Constant::getNullValue(GEPTy); 3975 Value *Temp; 3976 if (match(P, m_PtrToInt(m_Value(Temp)))) 3977 if (Temp->getType() == GEPTy) 3978 return Temp; 3979 return nullptr; 3980 }; 3981 3982 // getelementptr V, (sub P, V) -> P if P points to a type of size 1. 3983 if (TyAllocSize == 1 && 3984 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))))) 3985 if (Value *R = PtrToIntOrZero(P)) 3986 return R; 3987 3988 // getelementptr V, (ashr (sub P, V), C) -> Q 3989 // if P points to a type of size 1 << C. 3990 if (match(Ops[1], 3991 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 3992 m_ConstantInt(C))) && 3993 TyAllocSize == 1ULL << C) 3994 if (Value *R = PtrToIntOrZero(P)) 3995 return R; 3996 3997 // getelementptr V, (sdiv (sub P, V), C) -> Q 3998 // if P points to a type of size C. 3999 if (match(Ops[1], 4000 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 4001 m_SpecificInt(TyAllocSize)))) 4002 if (Value *R = PtrToIntOrZero(P)) 4003 return R; 4004 } 4005 } 4006 } 4007 4008 if (Q.DL.getTypeAllocSize(LastType) == 1 && 4009 all_of(Ops.slice(1).drop_back(1), 4010 [](Value *Idx) { return match(Idx, m_Zero()); })) { 4011 unsigned IdxWidth = 4012 Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace()); 4013 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) { 4014 APInt BasePtrOffset(IdxWidth, 0); 4015 Value *StrippedBasePtr = 4016 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL, 4017 BasePtrOffset); 4018 4019 // gep (gep V, C), (sub 0, V) -> C 4020 if (match(Ops.back(), 4021 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr))))) { 4022 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset); 4023 return ConstantExpr::getIntToPtr(CI, GEPTy); 4024 } 4025 // gep (gep V, C), (xor V, -1) -> C-1 4026 if (match(Ops.back(), 4027 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes()))) { 4028 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1); 4029 return ConstantExpr::getIntToPtr(CI, GEPTy); 4030 } 4031 } 4032 } 4033 4034 // Check to see if this is constant foldable. 4035 if (!all_of(Ops, [](Value *V) { return isa<Constant>(V); })) 4036 return nullptr; 4037 4038 auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]), 4039 Ops.slice(1)); 4040 if (auto *CEFolded = ConstantFoldConstant(CE, Q.DL)) 4041 return CEFolded; 4042 return CE; 4043 } 4044 4045 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, 4046 const SimplifyQuery &Q) { 4047 return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit); 4048 } 4049 4050 /// Given operands for an InsertValueInst, see if we can fold the result. 4051 /// If not, this returns null. 4052 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val, 4053 ArrayRef<unsigned> Idxs, const SimplifyQuery &Q, 4054 unsigned) { 4055 if (Constant *CAgg = dyn_cast<Constant>(Agg)) 4056 if (Constant *CVal = dyn_cast<Constant>(Val)) 4057 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs); 4058 4059 // insertvalue x, undef, n -> x 4060 if (match(Val, m_Undef())) 4061 return Agg; 4062 4063 // insertvalue x, (extractvalue y, n), n 4064 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val)) 4065 if (EV->getAggregateOperand()->getType() == Agg->getType() && 4066 EV->getIndices() == Idxs) { 4067 // insertvalue undef, (extractvalue y, n), n -> y 4068 if (match(Agg, m_Undef())) 4069 return EV->getAggregateOperand(); 4070 4071 // insertvalue y, (extractvalue y, n), n -> y 4072 if (Agg == EV->getAggregateOperand()) 4073 return Agg; 4074 } 4075 4076 return nullptr; 4077 } 4078 4079 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val, 4080 ArrayRef<unsigned> Idxs, 4081 const SimplifyQuery &Q) { 4082 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit); 4083 } 4084 4085 Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx, 4086 const SimplifyQuery &Q) { 4087 // Try to constant fold. 4088 auto *VecC = dyn_cast<Constant>(Vec); 4089 auto *ValC = dyn_cast<Constant>(Val); 4090 auto *IdxC = dyn_cast<Constant>(Idx); 4091 if (VecC && ValC && IdxC) 4092 return ConstantFoldInsertElementInstruction(VecC, ValC, IdxC); 4093 4094 // Fold into undef if index is out of bounds. 4095 if (auto *CI = dyn_cast<ConstantInt>(Idx)) { 4096 uint64_t NumElements = cast<VectorType>(Vec->getType())->getNumElements(); 4097 if (CI->uge(NumElements)) 4098 return UndefValue::get(Vec->getType()); 4099 } 4100 4101 // If index is undef, it might be out of bounds (see above case) 4102 if (isa<UndefValue>(Idx)) 4103 return UndefValue::get(Vec->getType()); 4104 4105 // Inserting an undef scalar? Assume it is the same value as the existing 4106 // vector element. 4107 if (isa<UndefValue>(Val)) 4108 return Vec; 4109 4110 // If we are extracting a value from a vector, then inserting it into the same 4111 // place, that's the input vector: 4112 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec 4113 if (match(Val, m_ExtractElement(m_Specific(Vec), m_Specific(Idx)))) 4114 return Vec; 4115 4116 return nullptr; 4117 } 4118 4119 /// Given operands for an ExtractValueInst, see if we can fold the result. 4120 /// If not, this returns null. 4121 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 4122 const SimplifyQuery &, unsigned) { 4123 if (auto *CAgg = dyn_cast<Constant>(Agg)) 4124 return ConstantFoldExtractValueInstruction(CAgg, Idxs); 4125 4126 // extractvalue x, (insertvalue y, elt, n), n -> elt 4127 unsigned NumIdxs = Idxs.size(); 4128 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr; 4129 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) { 4130 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices(); 4131 unsigned NumInsertValueIdxs = InsertValueIdxs.size(); 4132 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs); 4133 if (InsertValueIdxs.slice(0, NumCommonIdxs) == 4134 Idxs.slice(0, NumCommonIdxs)) { 4135 if (NumIdxs == NumInsertValueIdxs) 4136 return IVI->getInsertedValueOperand(); 4137 break; 4138 } 4139 } 4140 4141 return nullptr; 4142 } 4143 4144 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 4145 const SimplifyQuery &Q) { 4146 return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit); 4147 } 4148 4149 /// Given operands for an ExtractElementInst, see if we can fold the result. 4150 /// If not, this returns null. 4151 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &, 4152 unsigned) { 4153 if (auto *CVec = dyn_cast<Constant>(Vec)) { 4154 if (auto *CIdx = dyn_cast<Constant>(Idx)) 4155 return ConstantFoldExtractElementInstruction(CVec, CIdx); 4156 4157 // The index is not relevant if our vector is a splat. 4158 if (auto *Splat = CVec->getSplatValue()) 4159 return Splat; 4160 4161 if (isa<UndefValue>(Vec)) 4162 return UndefValue::get(Vec->getType()->getVectorElementType()); 4163 } 4164 4165 // If extracting a specified index from the vector, see if we can recursively 4166 // find a previously computed scalar that was inserted into the vector. 4167 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) { 4168 if (IdxC->getValue().uge(Vec->getType()->getVectorNumElements())) 4169 // definitely out of bounds, thus undefined result 4170 return UndefValue::get(Vec->getType()->getVectorElementType()); 4171 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue())) 4172 return Elt; 4173 } 4174 4175 // An undef extract index can be arbitrarily chosen to be an out-of-range 4176 // index value, which would result in the instruction being undef. 4177 if (isa<UndefValue>(Idx)) 4178 return UndefValue::get(Vec->getType()->getVectorElementType()); 4179 4180 return nullptr; 4181 } 4182 4183 Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx, 4184 const SimplifyQuery &Q) { 4185 return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit); 4186 } 4187 4188 /// See if we can fold the given phi. If not, returns null. 4189 static Value *SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q) { 4190 // If all of the PHI's incoming values are the same then replace the PHI node 4191 // with the common value. 4192 Value *CommonValue = nullptr; 4193 bool HasUndefInput = false; 4194 for (Value *Incoming : PN->incoming_values()) { 4195 // If the incoming value is the phi node itself, it can safely be skipped. 4196 if (Incoming == PN) continue; 4197 if (isa<UndefValue>(Incoming)) { 4198 // Remember that we saw an undef value, but otherwise ignore them. 4199 HasUndefInput = true; 4200 continue; 4201 } 4202 if (CommonValue && Incoming != CommonValue) 4203 return nullptr; // Not the same, bail out. 4204 CommonValue = Incoming; 4205 } 4206 4207 // If CommonValue is null then all of the incoming values were either undef or 4208 // equal to the phi node itself. 4209 if (!CommonValue) 4210 return UndefValue::get(PN->getType()); 4211 4212 // If we have a PHI node like phi(X, undef, X), where X is defined by some 4213 // instruction, we cannot return X as the result of the PHI node unless it 4214 // dominates the PHI block. 4215 if (HasUndefInput) 4216 return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr; 4217 4218 return CommonValue; 4219 } 4220 4221 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op, 4222 Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) { 4223 if (auto *C = dyn_cast<Constant>(Op)) 4224 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL); 4225 4226 if (auto *CI = dyn_cast<CastInst>(Op)) { 4227 auto *Src = CI->getOperand(0); 4228 Type *SrcTy = Src->getType(); 4229 Type *MidTy = CI->getType(); 4230 Type *DstTy = Ty; 4231 if (Src->getType() == Ty) { 4232 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode()); 4233 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc); 4234 Type *SrcIntPtrTy = 4235 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr; 4236 Type *MidIntPtrTy = 4237 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr; 4238 Type *DstIntPtrTy = 4239 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr; 4240 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy, 4241 SrcIntPtrTy, MidIntPtrTy, 4242 DstIntPtrTy) == Instruction::BitCast) 4243 return Src; 4244 } 4245 } 4246 4247 // bitcast x -> x 4248 if (CastOpc == Instruction::BitCast) 4249 if (Op->getType() == Ty) 4250 return Op; 4251 4252 return nullptr; 4253 } 4254 4255 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, 4256 const SimplifyQuery &Q) { 4257 return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit); 4258 } 4259 4260 /// For the given destination element of a shuffle, peek through shuffles to 4261 /// match a root vector source operand that contains that element in the same 4262 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s). 4263 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, 4264 int MaskVal, Value *RootVec, 4265 unsigned MaxRecurse) { 4266 if (!MaxRecurse--) 4267 return nullptr; 4268 4269 // Bail out if any mask value is undefined. That kind of shuffle may be 4270 // simplified further based on demanded bits or other folds. 4271 if (MaskVal == -1) 4272 return nullptr; 4273 4274 // The mask value chooses which source operand we need to look at next. 4275 int InVecNumElts = Op0->getType()->getVectorNumElements(); 4276 int RootElt = MaskVal; 4277 Value *SourceOp = Op0; 4278 if (MaskVal >= InVecNumElts) { 4279 RootElt = MaskVal - InVecNumElts; 4280 SourceOp = Op1; 4281 } 4282 4283 // If the source operand is a shuffle itself, look through it to find the 4284 // matching root vector. 4285 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) { 4286 return foldIdentityShuffles( 4287 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1), 4288 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse); 4289 } 4290 4291 // TODO: Look through bitcasts? What if the bitcast changes the vector element 4292 // size? 4293 4294 // The source operand is not a shuffle. Initialize the root vector value for 4295 // this shuffle if that has not been done yet. 4296 if (!RootVec) 4297 RootVec = SourceOp; 4298 4299 // Give up as soon as a source operand does not match the existing root value. 4300 if (RootVec != SourceOp) 4301 return nullptr; 4302 4303 // The element must be coming from the same lane in the source vector 4304 // (although it may have crossed lanes in intermediate shuffles). 4305 if (RootElt != DestElt) 4306 return nullptr; 4307 4308 return RootVec; 4309 } 4310 4311 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask, 4312 Type *RetTy, const SimplifyQuery &Q, 4313 unsigned MaxRecurse) { 4314 if (isa<UndefValue>(Mask)) 4315 return UndefValue::get(RetTy); 4316 4317 Type *InVecTy = Op0->getType(); 4318 unsigned MaskNumElts = Mask->getType()->getVectorNumElements(); 4319 unsigned InVecNumElts = InVecTy->getVectorNumElements(); 4320 4321 SmallVector<int, 32> Indices; 4322 ShuffleVectorInst::getShuffleMask(Mask, Indices); 4323 assert(MaskNumElts == Indices.size() && 4324 "Size of Indices not same as number of mask elements?"); 4325 4326 // Canonicalization: If mask does not select elements from an input vector, 4327 // replace that input vector with undef. 4328 bool MaskSelects0 = false, MaskSelects1 = false; 4329 for (unsigned i = 0; i != MaskNumElts; ++i) { 4330 if (Indices[i] == -1) 4331 continue; 4332 if ((unsigned)Indices[i] < InVecNumElts) 4333 MaskSelects0 = true; 4334 else 4335 MaskSelects1 = true; 4336 } 4337 if (!MaskSelects0) 4338 Op0 = UndefValue::get(InVecTy); 4339 if (!MaskSelects1) 4340 Op1 = UndefValue::get(InVecTy); 4341 4342 auto *Op0Const = dyn_cast<Constant>(Op0); 4343 auto *Op1Const = dyn_cast<Constant>(Op1); 4344 4345 // If all operands are constant, constant fold the shuffle. 4346 if (Op0Const && Op1Const) 4347 return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask); 4348 4349 // Canonicalization: if only one input vector is constant, it shall be the 4350 // second one. 4351 if (Op0Const && !Op1Const) { 4352 std::swap(Op0, Op1); 4353 ShuffleVectorInst::commuteShuffleMask(Indices, InVecNumElts); 4354 } 4355 4356 // A shuffle of a splat is always the splat itself. Legal if the shuffle's 4357 // value type is same as the input vectors' type. 4358 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0)) 4359 if (isa<UndefValue>(Op1) && RetTy == InVecTy && 4360 OpShuf->getMask()->getSplatValue()) 4361 return Op0; 4362 4363 // Don't fold a shuffle with undef mask elements. This may get folded in a 4364 // better way using demanded bits or other analysis. 4365 // TODO: Should we allow this? 4366 if (find(Indices, -1) != Indices.end()) 4367 return nullptr; 4368 4369 // Check if every element of this shuffle can be mapped back to the 4370 // corresponding element of a single root vector. If so, we don't need this 4371 // shuffle. This handles simple identity shuffles as well as chains of 4372 // shuffles that may widen/narrow and/or move elements across lanes and back. 4373 Value *RootVec = nullptr; 4374 for (unsigned i = 0; i != MaskNumElts; ++i) { 4375 // Note that recursion is limited for each vector element, so if any element 4376 // exceeds the limit, this will fail to simplify. 4377 RootVec = 4378 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse); 4379 4380 // We can't replace a widening/narrowing shuffle with one of its operands. 4381 if (!RootVec || RootVec->getType() != RetTy) 4382 return nullptr; 4383 } 4384 return RootVec; 4385 } 4386 4387 /// Given operands for a ShuffleVectorInst, fold the result or return null. 4388 Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask, 4389 Type *RetTy, const SimplifyQuery &Q) { 4390 return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit); 4391 } 4392 4393 static Constant *foldConstant(Instruction::UnaryOps Opcode, 4394 Value *&Op, const SimplifyQuery &Q) { 4395 if (auto *C = dyn_cast<Constant>(Op)) 4396 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL); 4397 return nullptr; 4398 } 4399 4400 /// Given the operand for an FNeg, see if we can fold the result. If not, this 4401 /// returns null. 4402 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF, 4403 const SimplifyQuery &Q, unsigned MaxRecurse) { 4404 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q)) 4405 return C; 4406 4407 Value *X; 4408 // fneg (fneg X) ==> X 4409 if (match(Op, m_FNeg(m_Value(X)))) 4410 return X; 4411 4412 return nullptr; 4413 } 4414 4415 Value *llvm::SimplifyFNegInst(Value *Op, FastMathFlags FMF, 4416 const SimplifyQuery &Q) { 4417 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit); 4418 } 4419 4420 static Constant *propagateNaN(Constant *In) { 4421 // If the input is a vector with undef elements, just return a default NaN. 4422 if (!In->isNaN()) 4423 return ConstantFP::getNaN(In->getType()); 4424 4425 // Propagate the existing NaN constant when possible. 4426 // TODO: Should we quiet a signaling NaN? 4427 return In; 4428 } 4429 4430 static Constant *simplifyFPBinop(Value *Op0, Value *Op1) { 4431 if (isa<UndefValue>(Op0) || isa<UndefValue>(Op1)) 4432 return ConstantFP::getNaN(Op0->getType()); 4433 4434 if (match(Op0, m_NaN())) 4435 return propagateNaN(cast<Constant>(Op0)); 4436 if (match(Op1, m_NaN())) 4437 return propagateNaN(cast<Constant>(Op1)); 4438 4439 return nullptr; 4440 } 4441 4442 /// Given operands for an FAdd, see if we can fold the result. If not, this 4443 /// returns null. 4444 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4445 const SimplifyQuery &Q, unsigned MaxRecurse) { 4446 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q)) 4447 return C; 4448 4449 if (Constant *C = simplifyFPBinop(Op0, Op1)) 4450 return C; 4451 4452 // fadd X, -0 ==> X 4453 if (match(Op1, m_NegZeroFP())) 4454 return Op0; 4455 4456 // fadd X, 0 ==> X, when we know X is not -0 4457 if (match(Op1, m_PosZeroFP()) && 4458 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) 4459 return Op0; 4460 4461 // With nnan: -X + X --> 0.0 (and commuted variant) 4462 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN. 4463 // Negative zeros are allowed because we always end up with positive zero: 4464 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0 4465 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0 4466 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0 4467 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0 4468 if (FMF.noNaNs()) { 4469 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) || 4470 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0)))) 4471 return ConstantFP::getNullValue(Op0->getType()); 4472 4473 if (match(Op0, m_FNeg(m_Specific(Op1))) || 4474 match(Op1, m_FNeg(m_Specific(Op0)))) 4475 return ConstantFP::getNullValue(Op0->getType()); 4476 } 4477 4478 // (X - Y) + Y --> X 4479 // Y + (X - Y) --> X 4480 Value *X; 4481 if (FMF.noSignedZeros() && FMF.allowReassoc() && 4482 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) || 4483 match(Op1, m_FSub(m_Value(X), m_Specific(Op0))))) 4484 return X; 4485 4486 return nullptr; 4487 } 4488 4489 /// Given operands for an FSub, see if we can fold the result. If not, this 4490 /// returns null. 4491 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4492 const SimplifyQuery &Q, unsigned MaxRecurse) { 4493 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q)) 4494 return C; 4495 4496 if (Constant *C = simplifyFPBinop(Op0, Op1)) 4497 return C; 4498 4499 // fsub X, +0 ==> X 4500 if (match(Op1, m_PosZeroFP())) 4501 return Op0; 4502 4503 // fsub X, -0 ==> X, when we know X is not -0 4504 if (match(Op1, m_NegZeroFP()) && 4505 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) 4506 return Op0; 4507 4508 // fsub -0.0, (fsub -0.0, X) ==> X 4509 // fsub -0.0, (fneg X) ==> X 4510 Value *X; 4511 if (match(Op0, m_NegZeroFP()) && 4512 match(Op1, m_FNeg(m_Value(X)))) 4513 return X; 4514 4515 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored. 4516 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored. 4517 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) && 4518 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) || 4519 match(Op1, m_FNeg(m_Value(X))))) 4520 return X; 4521 4522 // fsub nnan x, x ==> 0.0 4523 if (FMF.noNaNs() && Op0 == Op1) 4524 return Constant::getNullValue(Op0->getType()); 4525 4526 // Y - (Y - X) --> X 4527 // (X + Y) - Y --> X 4528 if (FMF.noSignedZeros() && FMF.allowReassoc() && 4529 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) || 4530 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X))))) 4531 return X; 4532 4533 return nullptr; 4534 } 4535 4536 /// Given the operands for an FMul, see if we can fold the result 4537 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4538 const SimplifyQuery &Q, unsigned MaxRecurse) { 4539 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q)) 4540 return C; 4541 4542 if (Constant *C = simplifyFPBinop(Op0, Op1)) 4543 return C; 4544 4545 // fmul X, 1.0 ==> X 4546 if (match(Op1, m_FPOne())) 4547 return Op0; 4548 4549 // fmul nnan nsz X, 0 ==> 0 4550 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZeroFP())) 4551 return ConstantFP::getNullValue(Op0->getType()); 4552 4553 // sqrt(X) * sqrt(X) --> X, if we can: 4554 // 1. Remove the intermediate rounding (reassociate). 4555 // 2. Ignore non-zero negative numbers because sqrt would produce NAN. 4556 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0. 4557 Value *X; 4558 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) && 4559 FMF.allowReassoc() && FMF.noNaNs() && FMF.noSignedZeros()) 4560 return X; 4561 4562 return nullptr; 4563 } 4564 4565 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4566 const SimplifyQuery &Q) { 4567 return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit); 4568 } 4569 4570 4571 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4572 const SimplifyQuery &Q) { 4573 return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit); 4574 } 4575 4576 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4577 const SimplifyQuery &Q) { 4578 return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit); 4579 } 4580 4581 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4582 const SimplifyQuery &Q, unsigned) { 4583 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q)) 4584 return C; 4585 4586 if (Constant *C = simplifyFPBinop(Op0, Op1)) 4587 return C; 4588 4589 // X / 1.0 -> X 4590 if (match(Op1, m_FPOne())) 4591 return Op0; 4592 4593 // 0 / X -> 0 4594 // Requires that NaNs are off (X could be zero) and signed zeroes are 4595 // ignored (X could be positive or negative, so the output sign is unknown). 4596 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP())) 4597 return ConstantFP::getNullValue(Op0->getType()); 4598 4599 if (FMF.noNaNs()) { 4600 // X / X -> 1.0 is legal when NaNs are ignored. 4601 // We can ignore infinities because INF/INF is NaN. 4602 if (Op0 == Op1) 4603 return ConstantFP::get(Op0->getType(), 1.0); 4604 4605 // (X * Y) / Y --> X if we can reassociate to the above form. 4606 Value *X; 4607 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1)))) 4608 return X; 4609 4610 // -X / X -> -1.0 and 4611 // X / -X -> -1.0 are legal when NaNs are ignored. 4612 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored. 4613 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) || 4614 match(Op1, m_FNegNSZ(m_Specific(Op0)))) 4615 return ConstantFP::get(Op0->getType(), -1.0); 4616 } 4617 4618 return nullptr; 4619 } 4620 4621 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4622 const SimplifyQuery &Q) { 4623 return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit); 4624 } 4625 4626 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4627 const SimplifyQuery &Q, unsigned) { 4628 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q)) 4629 return C; 4630 4631 if (Constant *C = simplifyFPBinop(Op0, Op1)) 4632 return C; 4633 4634 // Unlike fdiv, the result of frem always matches the sign of the dividend. 4635 // The constant match may include undef elements in a vector, so return a full 4636 // zero constant as the result. 4637 if (FMF.noNaNs()) { 4638 // +0 % X -> 0 4639 if (match(Op0, m_PosZeroFP())) 4640 return ConstantFP::getNullValue(Op0->getType()); 4641 // -0 % X -> -0 4642 if (match(Op0, m_NegZeroFP())) 4643 return ConstantFP::getNegativeZero(Op0->getType()); 4644 } 4645 4646 return nullptr; 4647 } 4648 4649 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4650 const SimplifyQuery &Q) { 4651 return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit); 4652 } 4653 4654 //=== Helper functions for higher up the class hierarchy. 4655 4656 /// Given the operand for a UnaryOperator, see if we can fold the result. 4657 /// If not, this returns null. 4658 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q, 4659 unsigned MaxRecurse) { 4660 switch (Opcode) { 4661 case Instruction::FNeg: 4662 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse); 4663 default: 4664 llvm_unreachable("Unexpected opcode"); 4665 } 4666 } 4667 4668 /// Given the operand for a UnaryOperator, see if we can fold the result. 4669 /// If not, this returns null. 4670 /// Try to use FastMathFlags when folding the result. 4671 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op, 4672 const FastMathFlags &FMF, 4673 const SimplifyQuery &Q, unsigned MaxRecurse) { 4674 switch (Opcode) { 4675 case Instruction::FNeg: 4676 return simplifyFNegInst(Op, FMF, Q, MaxRecurse); 4677 default: 4678 return simplifyUnOp(Opcode, Op, Q, MaxRecurse); 4679 } 4680 } 4681 4682 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) { 4683 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit); 4684 } 4685 4686 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF, 4687 const SimplifyQuery &Q) { 4688 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit); 4689 } 4690 4691 /// Given operands for a BinaryOperator, see if we can fold the result. 4692 /// If not, this returns null. 4693 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4694 const SimplifyQuery &Q, unsigned MaxRecurse) { 4695 switch (Opcode) { 4696 case Instruction::Add: 4697 return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse); 4698 case Instruction::Sub: 4699 return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse); 4700 case Instruction::Mul: 4701 return SimplifyMulInst(LHS, RHS, Q, MaxRecurse); 4702 case Instruction::SDiv: 4703 return SimplifySDivInst(LHS, RHS, Q, MaxRecurse); 4704 case Instruction::UDiv: 4705 return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse); 4706 case Instruction::SRem: 4707 return SimplifySRemInst(LHS, RHS, Q, MaxRecurse); 4708 case Instruction::URem: 4709 return SimplifyURemInst(LHS, RHS, Q, MaxRecurse); 4710 case Instruction::Shl: 4711 return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse); 4712 case Instruction::LShr: 4713 return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse); 4714 case Instruction::AShr: 4715 return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse); 4716 case Instruction::And: 4717 return SimplifyAndInst(LHS, RHS, Q, MaxRecurse); 4718 case Instruction::Or: 4719 return SimplifyOrInst(LHS, RHS, Q, MaxRecurse); 4720 case Instruction::Xor: 4721 return SimplifyXorInst(LHS, RHS, Q, MaxRecurse); 4722 case Instruction::FAdd: 4723 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4724 case Instruction::FSub: 4725 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4726 case Instruction::FMul: 4727 return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4728 case Instruction::FDiv: 4729 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4730 case Instruction::FRem: 4731 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4732 default: 4733 llvm_unreachable("Unexpected opcode"); 4734 } 4735 } 4736 4737 /// Given operands for a BinaryOperator, see if we can fold the result. 4738 /// If not, this returns null. 4739 /// Try to use FastMathFlags when folding the result. 4740 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4741 const FastMathFlags &FMF, const SimplifyQuery &Q, 4742 unsigned MaxRecurse) { 4743 switch (Opcode) { 4744 case Instruction::FAdd: 4745 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse); 4746 case Instruction::FSub: 4747 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse); 4748 case Instruction::FMul: 4749 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse); 4750 case Instruction::FDiv: 4751 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse); 4752 default: 4753 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse); 4754 } 4755 } 4756 4757 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4758 const SimplifyQuery &Q) { 4759 return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit); 4760 } 4761 4762 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4763 FastMathFlags FMF, const SimplifyQuery &Q) { 4764 return ::SimplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit); 4765 } 4766 4767 /// Given operands for a CmpInst, see if we can fold the result. 4768 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 4769 const SimplifyQuery &Q, unsigned MaxRecurse) { 4770 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate)) 4771 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse); 4772 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4773 } 4774 4775 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 4776 const SimplifyQuery &Q) { 4777 return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit); 4778 } 4779 4780 static bool IsIdempotent(Intrinsic::ID ID) { 4781 switch (ID) { 4782 default: return false; 4783 4784 // Unary idempotent: f(f(x)) = f(x) 4785 case Intrinsic::fabs: 4786 case Intrinsic::floor: 4787 case Intrinsic::ceil: 4788 case Intrinsic::trunc: 4789 case Intrinsic::rint: 4790 case Intrinsic::nearbyint: 4791 case Intrinsic::round: 4792 case Intrinsic::canonicalize: 4793 return true; 4794 } 4795 } 4796 4797 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset, 4798 const DataLayout &DL) { 4799 GlobalValue *PtrSym; 4800 APInt PtrOffset; 4801 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL)) 4802 return nullptr; 4803 4804 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext()); 4805 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext()); 4806 Type *Int32PtrTy = Int32Ty->getPointerTo(); 4807 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext()); 4808 4809 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset); 4810 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64) 4811 return nullptr; 4812 4813 uint64_t OffsetInt = OffsetConstInt->getSExtValue(); 4814 if (OffsetInt % 4 != 0) 4815 return nullptr; 4816 4817 Constant *C = ConstantExpr::getGetElementPtr( 4818 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy), 4819 ConstantInt::get(Int64Ty, OffsetInt / 4)); 4820 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL); 4821 if (!Loaded) 4822 return nullptr; 4823 4824 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded); 4825 if (!LoadedCE) 4826 return nullptr; 4827 4828 if (LoadedCE->getOpcode() == Instruction::Trunc) { 4829 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0)); 4830 if (!LoadedCE) 4831 return nullptr; 4832 } 4833 4834 if (LoadedCE->getOpcode() != Instruction::Sub) 4835 return nullptr; 4836 4837 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0)); 4838 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt) 4839 return nullptr; 4840 auto *LoadedLHSPtr = LoadedLHS->getOperand(0); 4841 4842 Constant *LoadedRHS = LoadedCE->getOperand(1); 4843 GlobalValue *LoadedRHSSym; 4844 APInt LoadedRHSOffset; 4845 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset, 4846 DL) || 4847 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset) 4848 return nullptr; 4849 4850 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy); 4851 } 4852 4853 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0, 4854 const SimplifyQuery &Q) { 4855 // Idempotent functions return the same result when called repeatedly. 4856 Intrinsic::ID IID = F->getIntrinsicID(); 4857 if (IsIdempotent(IID)) 4858 if (auto *II = dyn_cast<IntrinsicInst>(Op0)) 4859 if (II->getIntrinsicID() == IID) 4860 return II; 4861 4862 Value *X; 4863 switch (IID) { 4864 case Intrinsic::fabs: 4865 if (SignBitMustBeZero(Op0, Q.TLI)) return Op0; 4866 break; 4867 case Intrinsic::bswap: 4868 // bswap(bswap(x)) -> x 4869 if (match(Op0, m_BSwap(m_Value(X)))) return X; 4870 break; 4871 case Intrinsic::bitreverse: 4872 // bitreverse(bitreverse(x)) -> x 4873 if (match(Op0, m_BitReverse(m_Value(X)))) return X; 4874 break; 4875 case Intrinsic::exp: 4876 // exp(log(x)) -> x 4877 if (Q.CxtI->hasAllowReassoc() && 4878 match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X)))) return X; 4879 break; 4880 case Intrinsic::exp2: 4881 // exp2(log2(x)) -> x 4882 if (Q.CxtI->hasAllowReassoc() && 4883 match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X)))) return X; 4884 break; 4885 case Intrinsic::log: 4886 // log(exp(x)) -> x 4887 if (Q.CxtI->hasAllowReassoc() && 4888 match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X)))) return X; 4889 break; 4890 case Intrinsic::log2: 4891 // log2(exp2(x)) -> x 4892 if (Q.CxtI->hasAllowReassoc() && 4893 (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) || 4894 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0), 4895 m_Value(X))))) return X; 4896 break; 4897 case Intrinsic::log10: 4898 // log10(pow(10.0, x)) -> x 4899 if (Q.CxtI->hasAllowReassoc() && 4900 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0), 4901 m_Value(X)))) return X; 4902 break; 4903 case Intrinsic::floor: 4904 case Intrinsic::trunc: 4905 case Intrinsic::ceil: 4906 case Intrinsic::round: 4907 case Intrinsic::nearbyint: 4908 case Intrinsic::rint: { 4909 // floor (sitofp x) -> sitofp x 4910 // floor (uitofp x) -> uitofp x 4911 // 4912 // Converting from int always results in a finite integral number or 4913 // infinity. For either of those inputs, these rounding functions always 4914 // return the same value, so the rounding can be eliminated. 4915 if (match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value()))) 4916 return Op0; 4917 break; 4918 } 4919 default: 4920 break; 4921 } 4922 4923 return nullptr; 4924 } 4925 4926 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1, 4927 const SimplifyQuery &Q) { 4928 Intrinsic::ID IID = F->getIntrinsicID(); 4929 Type *ReturnType = F->getReturnType(); 4930 switch (IID) { 4931 case Intrinsic::usub_with_overflow: 4932 case Intrinsic::ssub_with_overflow: 4933 // X - X -> { 0, false } 4934 if (Op0 == Op1) 4935 return Constant::getNullValue(ReturnType); 4936 LLVM_FALLTHROUGH; 4937 case Intrinsic::uadd_with_overflow: 4938 case Intrinsic::sadd_with_overflow: 4939 // X - undef -> { undef, false } 4940 // undef - X -> { undef, false } 4941 // X + undef -> { undef, false } 4942 // undef + x -> { undef, false } 4943 if (isa<UndefValue>(Op0) || isa<UndefValue>(Op1)) { 4944 return ConstantStruct::get( 4945 cast<StructType>(ReturnType), 4946 {UndefValue::get(ReturnType->getStructElementType(0)), 4947 Constant::getNullValue(ReturnType->getStructElementType(1))}); 4948 } 4949 break; 4950 case Intrinsic::umul_with_overflow: 4951 case Intrinsic::smul_with_overflow: 4952 // 0 * X -> { 0, false } 4953 // X * 0 -> { 0, false } 4954 if (match(Op0, m_Zero()) || match(Op1, m_Zero())) 4955 return Constant::getNullValue(ReturnType); 4956 // undef * X -> { 0, false } 4957 // X * undef -> { 0, false } 4958 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 4959 return Constant::getNullValue(ReturnType); 4960 break; 4961 case Intrinsic::uadd_sat: 4962 // sat(MAX + X) -> MAX 4963 // sat(X + MAX) -> MAX 4964 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes())) 4965 return Constant::getAllOnesValue(ReturnType); 4966 LLVM_FALLTHROUGH; 4967 case Intrinsic::sadd_sat: 4968 // sat(X + undef) -> -1 4969 // sat(undef + X) -> -1 4970 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1). 4971 // For signed: Assume undef is ~X, in which case X + ~X = -1. 4972 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 4973 return Constant::getAllOnesValue(ReturnType); 4974 4975 // X + 0 -> X 4976 if (match(Op1, m_Zero())) 4977 return Op0; 4978 // 0 + X -> X 4979 if (match(Op0, m_Zero())) 4980 return Op1; 4981 break; 4982 case Intrinsic::usub_sat: 4983 // sat(0 - X) -> 0, sat(X - MAX) -> 0 4984 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes())) 4985 return Constant::getNullValue(ReturnType); 4986 LLVM_FALLTHROUGH; 4987 case Intrinsic::ssub_sat: 4988 // X - X -> 0, X - undef -> 0, undef - X -> 0 4989 if (Op0 == Op1 || match(Op0, m_Undef()) || match(Op1, m_Undef())) 4990 return Constant::getNullValue(ReturnType); 4991 // X - 0 -> X 4992 if (match(Op1, m_Zero())) 4993 return Op0; 4994 break; 4995 case Intrinsic::load_relative: 4996 if (auto *C0 = dyn_cast<Constant>(Op0)) 4997 if (auto *C1 = dyn_cast<Constant>(Op1)) 4998 return SimplifyRelativeLoad(C0, C1, Q.DL); 4999 break; 5000 case Intrinsic::powi: 5001 if (auto *Power = dyn_cast<ConstantInt>(Op1)) { 5002 // powi(x, 0) -> 1.0 5003 if (Power->isZero()) 5004 return ConstantFP::get(Op0->getType(), 1.0); 5005 // powi(x, 1) -> x 5006 if (Power->isOne()) 5007 return Op0; 5008 } 5009 break; 5010 case Intrinsic::maxnum: 5011 case Intrinsic::minnum: 5012 case Intrinsic::maximum: 5013 case Intrinsic::minimum: { 5014 // If the arguments are the same, this is a no-op. 5015 if (Op0 == Op1) return Op0; 5016 5017 // If one argument is undef, return the other argument. 5018 if (match(Op0, m_Undef())) 5019 return Op1; 5020 if (match(Op1, m_Undef())) 5021 return Op0; 5022 5023 // If one argument is NaN, return other or NaN appropriately. 5024 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum; 5025 if (match(Op0, m_NaN())) 5026 return PropagateNaN ? Op0 : Op1; 5027 if (match(Op1, m_NaN())) 5028 return PropagateNaN ? Op1 : Op0; 5029 5030 // Min/max of the same operation with common operand: 5031 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants) 5032 if (auto *M0 = dyn_cast<IntrinsicInst>(Op0)) 5033 if (M0->getIntrinsicID() == IID && 5034 (M0->getOperand(0) == Op1 || M0->getOperand(1) == Op1)) 5035 return Op0; 5036 if (auto *M1 = dyn_cast<IntrinsicInst>(Op1)) 5037 if (M1->getIntrinsicID() == IID && 5038 (M1->getOperand(0) == Op0 || M1->getOperand(1) == Op0)) 5039 return Op1; 5040 5041 // min(X, -Inf) --> -Inf (and commuted variant) 5042 // max(X, +Inf) --> +Inf (and commuted variant) 5043 bool UseNegInf = IID == Intrinsic::minnum || IID == Intrinsic::minimum; 5044 const APFloat *C; 5045 if ((match(Op0, m_APFloat(C)) && C->isInfinity() && 5046 C->isNegative() == UseNegInf) || 5047 (match(Op1, m_APFloat(C)) && C->isInfinity() && 5048 C->isNegative() == UseNegInf)) 5049 return ConstantFP::getInfinity(ReturnType, UseNegInf); 5050 5051 // TODO: minnum(nnan x, inf) -> x 5052 // TODO: minnum(nnan ninf x, flt_max) -> x 5053 // TODO: maxnum(nnan x, -inf) -> x 5054 // TODO: maxnum(nnan ninf x, -flt_max) -> x 5055 break; 5056 } 5057 default: 5058 break; 5059 } 5060 5061 return nullptr; 5062 } 5063 5064 static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) { 5065 5066 // Intrinsics with no operands have some kind of side effect. Don't simplify. 5067 unsigned NumOperands = Call->getNumArgOperands(); 5068 if (!NumOperands) 5069 return nullptr; 5070 5071 Function *F = cast<Function>(Call->getCalledFunction()); 5072 Intrinsic::ID IID = F->getIntrinsicID(); 5073 if (NumOperands == 1) 5074 return simplifyUnaryIntrinsic(F, Call->getArgOperand(0), Q); 5075 5076 if (NumOperands == 2) 5077 return simplifyBinaryIntrinsic(F, Call->getArgOperand(0), 5078 Call->getArgOperand(1), Q); 5079 5080 // Handle intrinsics with 3 or more arguments. 5081 switch (IID) { 5082 case Intrinsic::masked_load: 5083 case Intrinsic::masked_gather: { 5084 Value *MaskArg = Call->getArgOperand(2); 5085 Value *PassthruArg = Call->getArgOperand(3); 5086 // If the mask is all zeros or undef, the "passthru" argument is the result. 5087 if (maskIsAllZeroOrUndef(MaskArg)) 5088 return PassthruArg; 5089 return nullptr; 5090 } 5091 case Intrinsic::fshl: 5092 case Intrinsic::fshr: { 5093 Value *Op0 = Call->getArgOperand(0), *Op1 = Call->getArgOperand(1), 5094 *ShAmtArg = Call->getArgOperand(2); 5095 5096 // If both operands are undef, the result is undef. 5097 if (match(Op0, m_Undef()) && match(Op1, m_Undef())) 5098 return UndefValue::get(F->getReturnType()); 5099 5100 // If shift amount is undef, assume it is zero. 5101 if (match(ShAmtArg, m_Undef())) 5102 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1); 5103 5104 const APInt *ShAmtC; 5105 if (match(ShAmtArg, m_APInt(ShAmtC))) { 5106 // If there's effectively no shift, return the 1st arg or 2nd arg. 5107 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth()); 5108 if (ShAmtC->urem(BitWidth).isNullValue()) 5109 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1); 5110 } 5111 return nullptr; 5112 } 5113 default: 5114 return nullptr; 5115 } 5116 } 5117 5118 Value *llvm::SimplifyCall(CallBase *Call, const SimplifyQuery &Q) { 5119 Value *Callee = Call->getCalledValue(); 5120 5121 // call undef -> undef 5122 // call null -> undef 5123 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee)) 5124 return UndefValue::get(Call->getType()); 5125 5126 Function *F = dyn_cast<Function>(Callee); 5127 if (!F) 5128 return nullptr; 5129 5130 if (F->isIntrinsic()) 5131 if (Value *Ret = simplifyIntrinsic(Call, Q)) 5132 return Ret; 5133 5134 if (!canConstantFoldCallTo(Call, F)) 5135 return nullptr; 5136 5137 SmallVector<Constant *, 4> ConstantArgs; 5138 unsigned NumArgs = Call->getNumArgOperands(); 5139 ConstantArgs.reserve(NumArgs); 5140 for (auto &Arg : Call->args()) { 5141 Constant *C = dyn_cast<Constant>(&Arg); 5142 if (!C) 5143 return nullptr; 5144 ConstantArgs.push_back(C); 5145 } 5146 5147 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI); 5148 } 5149 5150 /// See if we can compute a simplified version of this instruction. 5151 /// If not, this returns null. 5152 5153 Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ, 5154 OptimizationRemarkEmitter *ORE) { 5155 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I); 5156 Value *Result; 5157 5158 switch (I->getOpcode()) { 5159 default: 5160 Result = ConstantFoldInstruction(I, Q.DL, Q.TLI); 5161 break; 5162 case Instruction::FNeg: 5163 Result = SimplifyFNegInst(I->getOperand(0), I->getFastMathFlags(), Q); 5164 break; 5165 case Instruction::FAdd: 5166 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1), 5167 I->getFastMathFlags(), Q); 5168 break; 5169 case Instruction::Add: 5170 Result = 5171 SimplifyAddInst(I->getOperand(0), I->getOperand(1), 5172 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), 5173 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); 5174 break; 5175 case Instruction::FSub: 5176 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1), 5177 I->getFastMathFlags(), Q); 5178 break; 5179 case Instruction::Sub: 5180 Result = 5181 SimplifySubInst(I->getOperand(0), I->getOperand(1), 5182 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), 5183 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); 5184 break; 5185 case Instruction::FMul: 5186 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1), 5187 I->getFastMathFlags(), Q); 5188 break; 5189 case Instruction::Mul: 5190 Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), Q); 5191 break; 5192 case Instruction::SDiv: 5193 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), Q); 5194 break; 5195 case Instruction::UDiv: 5196 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), Q); 5197 break; 5198 case Instruction::FDiv: 5199 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), 5200 I->getFastMathFlags(), Q); 5201 break; 5202 case Instruction::SRem: 5203 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), Q); 5204 break; 5205 case Instruction::URem: 5206 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), Q); 5207 break; 5208 case Instruction::FRem: 5209 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), 5210 I->getFastMathFlags(), Q); 5211 break; 5212 case Instruction::Shl: 5213 Result = 5214 SimplifyShlInst(I->getOperand(0), I->getOperand(1), 5215 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), 5216 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); 5217 break; 5218 case Instruction::LShr: 5219 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1), 5220 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q); 5221 break; 5222 case Instruction::AShr: 5223 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1), 5224 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q); 5225 break; 5226 case Instruction::And: 5227 Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q); 5228 break; 5229 case Instruction::Or: 5230 Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), Q); 5231 break; 5232 case Instruction::Xor: 5233 Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), Q); 5234 break; 5235 case Instruction::ICmp: 5236 Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), 5237 I->getOperand(0), I->getOperand(1), Q); 5238 break; 5239 case Instruction::FCmp: 5240 Result = 5241 SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0), 5242 I->getOperand(1), I->getFastMathFlags(), Q); 5243 break; 5244 case Instruction::Select: 5245 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1), 5246 I->getOperand(2), Q); 5247 break; 5248 case Instruction::GetElementPtr: { 5249 SmallVector<Value *, 8> Ops(I->op_begin(), I->op_end()); 5250 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(), 5251 Ops, Q); 5252 break; 5253 } 5254 case Instruction::InsertValue: { 5255 InsertValueInst *IV = cast<InsertValueInst>(I); 5256 Result = SimplifyInsertValueInst(IV->getAggregateOperand(), 5257 IV->getInsertedValueOperand(), 5258 IV->getIndices(), Q); 5259 break; 5260 } 5261 case Instruction::InsertElement: { 5262 auto *IE = cast<InsertElementInst>(I); 5263 Result = SimplifyInsertElementInst(IE->getOperand(0), IE->getOperand(1), 5264 IE->getOperand(2), Q); 5265 break; 5266 } 5267 case Instruction::ExtractValue: { 5268 auto *EVI = cast<ExtractValueInst>(I); 5269 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(), 5270 EVI->getIndices(), Q); 5271 break; 5272 } 5273 case Instruction::ExtractElement: { 5274 auto *EEI = cast<ExtractElementInst>(I); 5275 Result = SimplifyExtractElementInst(EEI->getVectorOperand(), 5276 EEI->getIndexOperand(), Q); 5277 break; 5278 } 5279 case Instruction::ShuffleVector: { 5280 auto *SVI = cast<ShuffleVectorInst>(I); 5281 Result = SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 5282 SVI->getMask(), SVI->getType(), Q); 5283 break; 5284 } 5285 case Instruction::PHI: 5286 Result = SimplifyPHINode(cast<PHINode>(I), Q); 5287 break; 5288 case Instruction::Call: { 5289 Result = SimplifyCall(cast<CallInst>(I), Q); 5290 break; 5291 } 5292 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc: 5293 #include "llvm/IR/Instruction.def" 5294 #undef HANDLE_CAST_INST 5295 Result = 5296 SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), Q); 5297 break; 5298 case Instruction::Alloca: 5299 // No simplifications for Alloca and it can't be constant folded. 5300 Result = nullptr; 5301 break; 5302 } 5303 5304 // In general, it is possible for computeKnownBits to determine all bits in a 5305 // value even when the operands are not all constants. 5306 if (!Result && I->getType()->isIntOrIntVectorTy()) { 5307 KnownBits Known = computeKnownBits(I, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE); 5308 if (Known.isConstant()) 5309 Result = ConstantInt::get(I->getType(), Known.getConstant()); 5310 } 5311 5312 /// If called on unreachable code, the above logic may report that the 5313 /// instruction simplified to itself. Make life easier for users by 5314 /// detecting that case here, returning a safe value instead. 5315 return Result == I ? UndefValue::get(I->getType()) : Result; 5316 } 5317 5318 /// Implementation of recursive simplification through an instruction's 5319 /// uses. 5320 /// 5321 /// This is the common implementation of the recursive simplification routines. 5322 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to 5323 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of 5324 /// instructions to process and attempt to simplify it using 5325 /// InstructionSimplify. Recursively visited users which could not be 5326 /// simplified themselves are to the optional UnsimplifiedUsers set for 5327 /// further processing by the caller. 5328 /// 5329 /// This routine returns 'true' only when *it* simplifies something. The passed 5330 /// in simplified value does not count toward this. 5331 static bool replaceAndRecursivelySimplifyImpl( 5332 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, 5333 const DominatorTree *DT, AssumptionCache *AC, 5334 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) { 5335 bool Simplified = false; 5336 SmallSetVector<Instruction *, 8> Worklist; 5337 const DataLayout &DL = I->getModule()->getDataLayout(); 5338 5339 // If we have an explicit value to collapse to, do that round of the 5340 // simplification loop by hand initially. 5341 if (SimpleV) { 5342 for (User *U : I->users()) 5343 if (U != I) 5344 Worklist.insert(cast<Instruction>(U)); 5345 5346 // Replace the instruction with its simplified value. 5347 I->replaceAllUsesWith(SimpleV); 5348 5349 // Gracefully handle edge cases where the instruction is not wired into any 5350 // parent block. 5351 if (I->getParent() && !I->isEHPad() && !I->isTerminator() && 5352 !I->mayHaveSideEffects()) 5353 I->eraseFromParent(); 5354 } else { 5355 Worklist.insert(I); 5356 } 5357 5358 // Note that we must test the size on each iteration, the worklist can grow. 5359 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) { 5360 I = Worklist[Idx]; 5361 5362 // See if this instruction simplifies. 5363 SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC}); 5364 if (!SimpleV) { 5365 if (UnsimplifiedUsers) 5366 UnsimplifiedUsers->insert(I); 5367 continue; 5368 } 5369 5370 Simplified = true; 5371 5372 // Stash away all the uses of the old instruction so we can check them for 5373 // recursive simplifications after a RAUW. This is cheaper than checking all 5374 // uses of To on the recursive step in most cases. 5375 for (User *U : I->users()) 5376 Worklist.insert(cast<Instruction>(U)); 5377 5378 // Replace the instruction with its simplified value. 5379 I->replaceAllUsesWith(SimpleV); 5380 5381 // Gracefully handle edge cases where the instruction is not wired into any 5382 // parent block. 5383 if (I->getParent() && !I->isEHPad() && !I->isTerminator() && 5384 !I->mayHaveSideEffects()) 5385 I->eraseFromParent(); 5386 } 5387 return Simplified; 5388 } 5389 5390 bool llvm::recursivelySimplifyInstruction(Instruction *I, 5391 const TargetLibraryInfo *TLI, 5392 const DominatorTree *DT, 5393 AssumptionCache *AC) { 5394 return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC, nullptr); 5395 } 5396 5397 bool llvm::replaceAndRecursivelySimplify( 5398 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, 5399 const DominatorTree *DT, AssumptionCache *AC, 5400 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) { 5401 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!"); 5402 assert(SimpleV && "Must provide a simplified value."); 5403 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC, 5404 UnsimplifiedUsers); 5405 } 5406 5407 namespace llvm { 5408 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) { 5409 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 5410 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; 5411 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 5412 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr; 5413 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>(); 5414 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr; 5415 return {F.getParent()->getDataLayout(), TLI, DT, AC}; 5416 } 5417 5418 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR, 5419 const DataLayout &DL) { 5420 return {DL, &AR.TLI, &AR.DT, &AR.AC}; 5421 } 5422 5423 template <class T, class... TArgs> 5424 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM, 5425 Function &F) { 5426 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F); 5427 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F); 5428 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F); 5429 return {F.getParent()->getDataLayout(), TLI, DT, AC}; 5430 } 5431 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &, 5432 Function &); 5433 } 5434