1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements routines for folding instructions into simpler forms 10 // that do not require creating new instructions. This does constant folding 11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either 12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value 13 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been 14 // simplified: This is usually true and assuming it simplifies the logic (if 15 // they have not been simplified then results are correct but maybe suboptimal). 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/CmpInstAnalysis.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/LoopAnalysisManager.h" 28 #include "llvm/Analysis/MemoryBuiltins.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Analysis/VectorUtils.h" 31 #include "llvm/IR/ConstantRange.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/Dominators.h" 34 #include "llvm/IR/GetElementPtrTypeIterator.h" 35 #include "llvm/IR/GlobalAlias.h" 36 #include "llvm/IR/InstrTypes.h" 37 #include "llvm/IR/Instructions.h" 38 #include "llvm/IR/Operator.h" 39 #include "llvm/IR/PatternMatch.h" 40 #include "llvm/IR/ValueHandle.h" 41 #include "llvm/Support/KnownBits.h" 42 #include <algorithm> 43 using namespace llvm; 44 using namespace llvm::PatternMatch; 45 46 #define DEBUG_TYPE "instsimplify" 47 48 enum { RecursionLimit = 3 }; 49 50 STATISTIC(NumExpand, "Number of expansions"); 51 STATISTIC(NumReassoc, "Number of reassociations"); 52 53 static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned); 54 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned); 55 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, 56 const SimplifyQuery &, unsigned); 57 static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, 58 unsigned); 59 static Value *SimplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &, 60 const SimplifyQuery &, unsigned); 61 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &, 62 unsigned); 63 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 64 const SimplifyQuery &Q, unsigned MaxRecurse); 65 static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned); 66 static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned); 67 static Value *SimplifyCastInst(unsigned, Value *, Type *, 68 const SimplifyQuery &, unsigned); 69 static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, const SimplifyQuery &, 70 unsigned); 71 72 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal, 73 Value *FalseVal) { 74 BinaryOperator::BinaryOps BinOpCode; 75 if (auto *BO = dyn_cast<BinaryOperator>(Cond)) 76 BinOpCode = BO->getOpcode(); 77 else 78 return nullptr; 79 80 CmpInst::Predicate ExpectedPred, Pred1, Pred2; 81 if (BinOpCode == BinaryOperator::Or) { 82 ExpectedPred = ICmpInst::ICMP_NE; 83 } else if (BinOpCode == BinaryOperator::And) { 84 ExpectedPred = ICmpInst::ICMP_EQ; 85 } else 86 return nullptr; 87 88 // %A = icmp eq %TV, %FV 89 // %B = icmp eq %X, %Y (and one of these is a select operand) 90 // %C = and %A, %B 91 // %D = select %C, %TV, %FV 92 // --> 93 // %FV 94 95 // %A = icmp ne %TV, %FV 96 // %B = icmp ne %X, %Y (and one of these is a select operand) 97 // %C = or %A, %B 98 // %D = select %C, %TV, %FV 99 // --> 100 // %TV 101 Value *X, *Y; 102 if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal), 103 m_Specific(FalseVal)), 104 m_ICmp(Pred2, m_Value(X), m_Value(Y)))) || 105 Pred1 != Pred2 || Pred1 != ExpectedPred) 106 return nullptr; 107 108 if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal) 109 return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal; 110 111 return nullptr; 112 } 113 114 /// For a boolean type or a vector of boolean type, return false or a vector 115 /// with every element false. 116 static Constant *getFalse(Type *Ty) { 117 return ConstantInt::getFalse(Ty); 118 } 119 120 /// For a boolean type or a vector of boolean type, return true or a vector 121 /// with every element true. 122 static Constant *getTrue(Type *Ty) { 123 return ConstantInt::getTrue(Ty); 124 } 125 126 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"? 127 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, 128 Value *RHS) { 129 CmpInst *Cmp = dyn_cast<CmpInst>(V); 130 if (!Cmp) 131 return false; 132 CmpInst::Predicate CPred = Cmp->getPredicate(); 133 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1); 134 if (CPred == Pred && CLHS == LHS && CRHS == RHS) 135 return true; 136 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS && 137 CRHS == LHS; 138 } 139 140 /// Simplify comparison with true or false branch of select: 141 /// %sel = select i1 %cond, i32 %tv, i32 %fv 142 /// %cmp = icmp sle i32 %sel, %rhs 143 /// Compose new comparison by substituting %sel with either %tv or %fv 144 /// and see if it simplifies. 145 static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS, 146 Value *RHS, Value *Cond, 147 const SimplifyQuery &Q, unsigned MaxRecurse, 148 Constant *TrueOrFalse) { 149 Value *SimplifiedCmp = SimplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse); 150 if (SimplifiedCmp == Cond) { 151 // %cmp simplified to the select condition (%cond). 152 return TrueOrFalse; 153 } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) { 154 // It didn't simplify. However, if composed comparison is equivalent 155 // to the select condition (%cond) then we can replace it. 156 return TrueOrFalse; 157 } 158 return SimplifiedCmp; 159 } 160 161 /// Simplify comparison with true branch of select 162 static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS, 163 Value *RHS, Value *Cond, 164 const SimplifyQuery &Q, 165 unsigned MaxRecurse) { 166 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse, 167 getTrue(Cond->getType())); 168 } 169 170 /// Simplify comparison with false branch of select 171 static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS, 172 Value *RHS, Value *Cond, 173 const SimplifyQuery &Q, 174 unsigned MaxRecurse) { 175 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse, 176 getFalse(Cond->getType())); 177 } 178 179 /// We know comparison with both branches of select can be simplified, but they 180 /// are not equal. This routine handles some logical simplifications. 181 static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, 182 Value *Cond, 183 const SimplifyQuery &Q, 184 unsigned MaxRecurse) { 185 // If the false value simplified to false, then the result of the compare 186 // is equal to "Cond && TCmp". This also catches the case when the false 187 // value simplified to false and the true value to true, returning "Cond". 188 if (match(FCmp, m_Zero())) 189 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse)) 190 return V; 191 // If the true value simplified to true, then the result of the compare 192 // is equal to "Cond || FCmp". 193 if (match(TCmp, m_One())) 194 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse)) 195 return V; 196 // Finally, if the false value simplified to true and the true value to 197 // false, then the result of the compare is equal to "!Cond". 198 if (match(FCmp, m_One()) && match(TCmp, m_Zero())) 199 if (Value *V = SimplifyXorInst( 200 Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse)) 201 return V; 202 return nullptr; 203 } 204 205 /// Does the given value dominate the specified phi node? 206 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) { 207 Instruction *I = dyn_cast<Instruction>(V); 208 if (!I) 209 // Arguments and constants dominate all instructions. 210 return true; 211 212 // If we are processing instructions (and/or basic blocks) that have not been 213 // fully added to a function, the parent nodes may still be null. Simply 214 // return the conservative answer in these cases. 215 if (!I->getParent() || !P->getParent() || !I->getFunction()) 216 return false; 217 218 // If we have a DominatorTree then do a precise test. 219 if (DT) 220 return DT->dominates(I, P); 221 222 // Otherwise, if the instruction is in the entry block and is not an invoke, 223 // then it obviously dominates all phi nodes. 224 if (I->getParent() == &I->getFunction()->getEntryBlock() && 225 !isa<InvokeInst>(I)) 226 return true; 227 228 return false; 229 } 230 231 /// Simplify "A op (B op' C)" by distributing op over op', turning it into 232 /// "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is 233 /// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS. 234 /// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)". 235 /// Returns the simplified value, or null if no simplification was performed. 236 static Value *ExpandBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, 237 Instruction::BinaryOps OpcodeToExpand, 238 const SimplifyQuery &Q, unsigned MaxRecurse) { 239 // Recursion is always used, so bail out at once if we already hit the limit. 240 if (!MaxRecurse--) 241 return nullptr; 242 243 // Check whether the expression has the form "(A op' B) op C". 244 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS)) 245 if (Op0->getOpcode() == OpcodeToExpand) { 246 // It does! Try turning it into "(A op C) op' (B op C)". 247 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 248 // Do "A op C" and "B op C" both simplify? 249 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) 250 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 251 // They do! Return "L op' R" if it simplifies or is already available. 252 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 253 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand) 254 && L == B && R == A)) { 255 ++NumExpand; 256 return LHS; 257 } 258 // Otherwise return "L op' R" if it simplifies. 259 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 260 ++NumExpand; 261 return V; 262 } 263 } 264 } 265 266 // Check whether the expression has the form "A op (B op' C)". 267 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS)) 268 if (Op1->getOpcode() == OpcodeToExpand) { 269 // It does! Try turning it into "(A op B) op' (A op C)". 270 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 271 // Do "A op B" and "A op C" both simplify? 272 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) 273 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) { 274 // They do! Return "L op' R" if it simplifies or is already available. 275 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 276 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand) 277 && L == C && R == B)) { 278 ++NumExpand; 279 return RHS; 280 } 281 // Otherwise return "L op' R" if it simplifies. 282 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 283 ++NumExpand; 284 return V; 285 } 286 } 287 } 288 289 return nullptr; 290 } 291 292 /// Generic simplifications for associative binary operations. 293 /// Returns the simpler value, or null if none was found. 294 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode, 295 Value *LHS, Value *RHS, 296 const SimplifyQuery &Q, 297 unsigned MaxRecurse) { 298 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!"); 299 300 // Recursion is always used, so bail out at once if we already hit the limit. 301 if (!MaxRecurse--) 302 return nullptr; 303 304 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 305 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 306 307 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely. 308 if (Op0 && Op0->getOpcode() == Opcode) { 309 Value *A = Op0->getOperand(0); 310 Value *B = Op0->getOperand(1); 311 Value *C = RHS; 312 313 // Does "B op C" simplify? 314 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 315 // It does! Return "A op V" if it simplifies or is already available. 316 // If V equals B then "A op V" is just the LHS. 317 if (V == B) return LHS; 318 // Otherwise return "A op V" if it simplifies. 319 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) { 320 ++NumReassoc; 321 return W; 322 } 323 } 324 } 325 326 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely. 327 if (Op1 && Op1->getOpcode() == Opcode) { 328 Value *A = LHS; 329 Value *B = Op1->getOperand(0); 330 Value *C = Op1->getOperand(1); 331 332 // Does "A op B" simplify? 333 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) { 334 // It does! Return "V op C" if it simplifies or is already available. 335 // If V equals B then "V op C" is just the RHS. 336 if (V == B) return RHS; 337 // Otherwise return "V op C" if it simplifies. 338 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) { 339 ++NumReassoc; 340 return W; 341 } 342 } 343 } 344 345 // The remaining transforms require commutativity as well as associativity. 346 if (!Instruction::isCommutative(Opcode)) 347 return nullptr; 348 349 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely. 350 if (Op0 && Op0->getOpcode() == Opcode) { 351 Value *A = Op0->getOperand(0); 352 Value *B = Op0->getOperand(1); 353 Value *C = RHS; 354 355 // Does "C op A" simplify? 356 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 357 // It does! Return "V op B" if it simplifies or is already available. 358 // If V equals A then "V op B" is just the LHS. 359 if (V == A) return LHS; 360 // Otherwise return "V op B" if it simplifies. 361 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) { 362 ++NumReassoc; 363 return W; 364 } 365 } 366 } 367 368 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely. 369 if (Op1 && Op1->getOpcode() == Opcode) { 370 Value *A = LHS; 371 Value *B = Op1->getOperand(0); 372 Value *C = Op1->getOperand(1); 373 374 // Does "C op A" simplify? 375 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 376 // It does! Return "B op V" if it simplifies or is already available. 377 // If V equals C then "B op V" is just the RHS. 378 if (V == C) return RHS; 379 // Otherwise return "B op V" if it simplifies. 380 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) { 381 ++NumReassoc; 382 return W; 383 } 384 } 385 } 386 387 return nullptr; 388 } 389 390 /// In the case of a binary operation with a select instruction as an operand, 391 /// try to simplify the binop by seeing whether evaluating it on both branches 392 /// of the select results in the same value. Returns the common value if so, 393 /// otherwise returns null. 394 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, 395 Value *RHS, const SimplifyQuery &Q, 396 unsigned MaxRecurse) { 397 // Recursion is always used, so bail out at once if we already hit the limit. 398 if (!MaxRecurse--) 399 return nullptr; 400 401 SelectInst *SI; 402 if (isa<SelectInst>(LHS)) { 403 SI = cast<SelectInst>(LHS); 404 } else { 405 assert(isa<SelectInst>(RHS) && "No select instruction operand!"); 406 SI = cast<SelectInst>(RHS); 407 } 408 409 // Evaluate the BinOp on the true and false branches of the select. 410 Value *TV; 411 Value *FV; 412 if (SI == LHS) { 413 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse); 414 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse); 415 } else { 416 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse); 417 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse); 418 } 419 420 // If they simplified to the same value, then return the common value. 421 // If they both failed to simplify then return null. 422 if (TV == FV) 423 return TV; 424 425 // If one branch simplified to undef, return the other one. 426 if (TV && isa<UndefValue>(TV)) 427 return FV; 428 if (FV && isa<UndefValue>(FV)) 429 return TV; 430 431 // If applying the operation did not change the true and false select values, 432 // then the result of the binop is the select itself. 433 if (TV == SI->getTrueValue() && FV == SI->getFalseValue()) 434 return SI; 435 436 // If one branch simplified and the other did not, and the simplified 437 // value is equal to the unsimplified one, return the simplified value. 438 // For example, select (cond, X, X & Z) & Z -> X & Z. 439 if ((FV && !TV) || (TV && !FV)) { 440 // Check that the simplified value has the form "X op Y" where "op" is the 441 // same as the original operation. 442 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV); 443 if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) { 444 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS". 445 // We already know that "op" is the same as for the simplified value. See 446 // if the operands match too. If so, return the simplified value. 447 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue(); 448 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS; 449 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch; 450 if (Simplified->getOperand(0) == UnsimplifiedLHS && 451 Simplified->getOperand(1) == UnsimplifiedRHS) 452 return Simplified; 453 if (Simplified->isCommutative() && 454 Simplified->getOperand(1) == UnsimplifiedLHS && 455 Simplified->getOperand(0) == UnsimplifiedRHS) 456 return Simplified; 457 } 458 } 459 460 return nullptr; 461 } 462 463 /// In the case of a comparison with a select instruction, try to simplify the 464 /// comparison by seeing whether both branches of the select result in the same 465 /// value. Returns the common value if so, otherwise returns null. 466 /// For example, if we have: 467 /// %tmp = select i1 %cmp, i32 1, i32 2 468 /// %cmp1 = icmp sle i32 %tmp, 3 469 /// We can simplify %cmp1 to true, because both branches of select are 470 /// less than 3. We compose new comparison by substituting %tmp with both 471 /// branches of select and see if it can be simplified. 472 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS, 473 Value *RHS, const SimplifyQuery &Q, 474 unsigned MaxRecurse) { 475 // Recursion is always used, so bail out at once if we already hit the limit. 476 if (!MaxRecurse--) 477 return nullptr; 478 479 // Make sure the select is on the LHS. 480 if (!isa<SelectInst>(LHS)) { 481 std::swap(LHS, RHS); 482 Pred = CmpInst::getSwappedPredicate(Pred); 483 } 484 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!"); 485 SelectInst *SI = cast<SelectInst>(LHS); 486 Value *Cond = SI->getCondition(); 487 Value *TV = SI->getTrueValue(); 488 Value *FV = SI->getFalseValue(); 489 490 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it. 491 // Does "cmp TV, RHS" simplify? 492 Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse); 493 if (!TCmp) 494 return nullptr; 495 496 // Does "cmp FV, RHS" simplify? 497 Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse); 498 if (!FCmp) 499 return nullptr; 500 501 // If both sides simplified to the same value, then use it as the result of 502 // the original comparison. 503 if (TCmp == FCmp) 504 return TCmp; 505 506 // The remaining cases only make sense if the select condition has the same 507 // type as the result of the comparison, so bail out if this is not so. 508 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy()) 509 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse); 510 511 return nullptr; 512 } 513 514 /// In the case of a binary operation with an operand that is a PHI instruction, 515 /// try to simplify the binop by seeing whether evaluating it on the incoming 516 /// phi values yields the same result for every value. If so returns the common 517 /// value, otherwise returns null. 518 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, 519 Value *RHS, const SimplifyQuery &Q, 520 unsigned MaxRecurse) { 521 // Recursion is always used, so bail out at once if we already hit the limit. 522 if (!MaxRecurse--) 523 return nullptr; 524 525 PHINode *PI; 526 if (isa<PHINode>(LHS)) { 527 PI = cast<PHINode>(LHS); 528 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 529 if (!valueDominatesPHI(RHS, PI, Q.DT)) 530 return nullptr; 531 } else { 532 assert(isa<PHINode>(RHS) && "No PHI instruction operand!"); 533 PI = cast<PHINode>(RHS); 534 // Bail out if LHS and the phi may be mutually interdependent due to a loop. 535 if (!valueDominatesPHI(LHS, PI, Q.DT)) 536 return nullptr; 537 } 538 539 // Evaluate the BinOp on the incoming phi values. 540 Value *CommonValue = nullptr; 541 for (Value *Incoming : PI->incoming_values()) { 542 // If the incoming value is the phi node itself, it can safely be skipped. 543 if (Incoming == PI) continue; 544 Value *V = PI == LHS ? 545 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) : 546 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse); 547 // If the operation failed to simplify, or simplified to a different value 548 // to previously, then give up. 549 if (!V || (CommonValue && V != CommonValue)) 550 return nullptr; 551 CommonValue = V; 552 } 553 554 return CommonValue; 555 } 556 557 /// In the case of a comparison with a PHI instruction, try to simplify the 558 /// comparison by seeing whether comparing with all of the incoming phi values 559 /// yields the same result every time. If so returns the common result, 560 /// otherwise returns null. 561 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS, 562 const SimplifyQuery &Q, unsigned MaxRecurse) { 563 // Recursion is always used, so bail out at once if we already hit the limit. 564 if (!MaxRecurse--) 565 return nullptr; 566 567 // Make sure the phi is on the LHS. 568 if (!isa<PHINode>(LHS)) { 569 std::swap(LHS, RHS); 570 Pred = CmpInst::getSwappedPredicate(Pred); 571 } 572 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!"); 573 PHINode *PI = cast<PHINode>(LHS); 574 575 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 576 if (!valueDominatesPHI(RHS, PI, Q.DT)) 577 return nullptr; 578 579 // Evaluate the BinOp on the incoming phi values. 580 Value *CommonValue = nullptr; 581 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) { 582 Value *Incoming = PI->getIncomingValue(u); 583 Instruction *InTI = PI->getIncomingBlock(u)->getTerminator(); 584 // If the incoming value is the phi node itself, it can safely be skipped. 585 if (Incoming == PI) continue; 586 // Change the context instruction to the "edge" that flows into the phi. 587 // This is important because that is where incoming is actually "evaluated" 588 // even though it is used later somewhere else. 589 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI), 590 MaxRecurse); 591 // If the operation failed to simplify, or simplified to a different value 592 // to previously, then give up. 593 if (!V || (CommonValue && V != CommonValue)) 594 return nullptr; 595 CommonValue = V; 596 } 597 598 return CommonValue; 599 } 600 601 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode, 602 Value *&Op0, Value *&Op1, 603 const SimplifyQuery &Q) { 604 if (auto *CLHS = dyn_cast<Constant>(Op0)) { 605 if (auto *CRHS = dyn_cast<Constant>(Op1)) 606 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL); 607 608 // Canonicalize the constant to the RHS if this is a commutative operation. 609 if (Instruction::isCommutative(Opcode)) 610 std::swap(Op0, Op1); 611 } 612 return nullptr; 613 } 614 615 /// Given operands for an Add, see if we can fold the result. 616 /// If not, this returns null. 617 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, 618 const SimplifyQuery &Q, unsigned MaxRecurse) { 619 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q)) 620 return C; 621 622 // X + undef -> undef 623 if (match(Op1, m_Undef())) 624 return Op1; 625 626 // X + 0 -> X 627 if (match(Op1, m_Zero())) 628 return Op0; 629 630 // If two operands are negative, return 0. 631 if (isKnownNegation(Op0, Op1)) 632 return Constant::getNullValue(Op0->getType()); 633 634 // X + (Y - X) -> Y 635 // (Y - X) + X -> Y 636 // Eg: X + -X -> 0 637 Value *Y = nullptr; 638 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) || 639 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1)))) 640 return Y; 641 642 // X + ~X -> -1 since ~X = -X-1 643 Type *Ty = Op0->getType(); 644 if (match(Op0, m_Not(m_Specific(Op1))) || 645 match(Op1, m_Not(m_Specific(Op0)))) 646 return Constant::getAllOnesValue(Ty); 647 648 // add nsw/nuw (xor Y, signmask), signmask --> Y 649 // The no-wrapping add guarantees that the top bit will be set by the add. 650 // Therefore, the xor must be clearing the already set sign bit of Y. 651 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) && 652 match(Op0, m_Xor(m_Value(Y), m_SignMask()))) 653 return Y; 654 655 // add nuw %x, -1 -> -1, because %x can only be 0. 656 if (IsNUW && match(Op1, m_AllOnes())) 657 return Op1; // Which is -1. 658 659 /// i1 add -> xor. 660 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) 661 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 662 return V; 663 664 // Try some generic simplifications for associative operations. 665 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, 666 MaxRecurse)) 667 return V; 668 669 // Threading Add over selects and phi nodes is pointless, so don't bother. 670 // Threading over the select in "A + select(cond, B, C)" means evaluating 671 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and 672 // only if B and C are equal. If B and C are equal then (since we assume 673 // that operands have already been simplified) "select(cond, B, C)" should 674 // have been simplified to the common value of B and C already. Analysing 675 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly 676 // for threading over phi nodes. 677 678 return nullptr; 679 } 680 681 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, 682 const SimplifyQuery &Query) { 683 return ::SimplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit); 684 } 685 686 /// Compute the base pointer and cumulative constant offsets for V. 687 /// 688 /// This strips all constant offsets off of V, leaving it the base pointer, and 689 /// accumulates the total constant offset applied in the returned constant. It 690 /// returns 0 if V is not a pointer, and returns the constant '0' if there are 691 /// no constant offsets applied. 692 /// 693 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't 694 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc. 695 /// folding. 696 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, 697 bool AllowNonInbounds = false) { 698 assert(V->getType()->isPtrOrPtrVectorTy()); 699 700 Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType(); 701 APInt Offset = APInt::getNullValue(IntIdxTy->getIntegerBitWidth()); 702 703 V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds); 704 // As that strip may trace through `addrspacecast`, need to sext or trunc 705 // the offset calculated. 706 IntIdxTy = DL.getIndexType(V->getType())->getScalarType(); 707 Offset = Offset.sextOrTrunc(IntIdxTy->getIntegerBitWidth()); 708 709 Constant *OffsetIntPtr = ConstantInt::get(IntIdxTy, Offset); 710 if (VectorType *VecTy = dyn_cast<VectorType>(V->getType())) 711 return ConstantVector::getSplat(VecTy->getElementCount(), OffsetIntPtr); 712 return OffsetIntPtr; 713 } 714 715 /// Compute the constant difference between two pointer values. 716 /// If the difference is not a constant, returns zero. 717 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS, 718 Value *RHS) { 719 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 720 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 721 722 // If LHS and RHS are not related via constant offsets to the same base 723 // value, there is nothing we can do here. 724 if (LHS != RHS) 725 return nullptr; 726 727 // Otherwise, the difference of LHS - RHS can be computed as: 728 // LHS - RHS 729 // = (LHSOffset + Base) - (RHSOffset + Base) 730 // = LHSOffset - RHSOffset 731 return ConstantExpr::getSub(LHSOffset, RHSOffset); 732 } 733 734 /// Given operands for a Sub, see if we can fold the result. 735 /// If not, this returns null. 736 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 737 const SimplifyQuery &Q, unsigned MaxRecurse) { 738 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q)) 739 return C; 740 741 // X - undef -> undef 742 // undef - X -> undef 743 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 744 return UndefValue::get(Op0->getType()); 745 746 // X - 0 -> X 747 if (match(Op1, m_Zero())) 748 return Op0; 749 750 // X - X -> 0 751 if (Op0 == Op1) 752 return Constant::getNullValue(Op0->getType()); 753 754 // Is this a negation? 755 if (match(Op0, m_Zero())) { 756 // 0 - X -> 0 if the sub is NUW. 757 if (isNUW) 758 return Constant::getNullValue(Op0->getType()); 759 760 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 761 if (Known.Zero.isMaxSignedValue()) { 762 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then 763 // Op1 must be 0 because negating the minimum signed value is undefined. 764 if (isNSW) 765 return Constant::getNullValue(Op0->getType()); 766 767 // 0 - X -> X if X is 0 or the minimum signed value. 768 return Op1; 769 } 770 } 771 772 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies. 773 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X 774 Value *X = nullptr, *Y = nullptr, *Z = Op1; 775 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z 776 // See if "V === Y - Z" simplifies. 777 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1)) 778 // It does! Now see if "X + V" simplifies. 779 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) { 780 // It does, we successfully reassociated! 781 ++NumReassoc; 782 return W; 783 } 784 // See if "V === X - Z" simplifies. 785 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 786 // It does! Now see if "Y + V" simplifies. 787 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) { 788 // It does, we successfully reassociated! 789 ++NumReassoc; 790 return W; 791 } 792 } 793 794 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies. 795 // For example, X - (X + 1) -> -1 796 X = Op0; 797 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z) 798 // See if "V === X - Y" simplifies. 799 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 800 // It does! Now see if "V - Z" simplifies. 801 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) { 802 // It does, we successfully reassociated! 803 ++NumReassoc; 804 return W; 805 } 806 // See if "V === X - Z" simplifies. 807 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 808 // It does! Now see if "V - Y" simplifies. 809 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) { 810 // It does, we successfully reassociated! 811 ++NumReassoc; 812 return W; 813 } 814 } 815 816 // Z - (X - Y) -> (Z - X) + Y if everything simplifies. 817 // For example, X - (X - Y) -> Y. 818 Z = Op0; 819 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y) 820 // See if "V === Z - X" simplifies. 821 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1)) 822 // It does! Now see if "V + Y" simplifies. 823 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) { 824 // It does, we successfully reassociated! 825 ++NumReassoc; 826 return W; 827 } 828 829 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies. 830 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) && 831 match(Op1, m_Trunc(m_Value(Y)))) 832 if (X->getType() == Y->getType()) 833 // See if "V === X - Y" simplifies. 834 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 835 // It does! Now see if "trunc V" simplifies. 836 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(), 837 Q, MaxRecurse - 1)) 838 // It does, return the simplified "trunc V". 839 return W; 840 841 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...). 842 if (match(Op0, m_PtrToInt(m_Value(X))) && 843 match(Op1, m_PtrToInt(m_Value(Y)))) 844 if (Constant *Result = computePointerDifference(Q.DL, X, Y)) 845 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true); 846 847 // i1 sub -> xor. 848 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) 849 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 850 return V; 851 852 // Threading Sub over selects and phi nodes is pointless, so don't bother. 853 // Threading over the select in "A - select(cond, B, C)" means evaluating 854 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and 855 // only if B and C are equal. If B and C are equal then (since we assume 856 // that operands have already been simplified) "select(cond, B, C)" should 857 // have been simplified to the common value of B and C already. Analysing 858 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly 859 // for threading over phi nodes. 860 861 return nullptr; 862 } 863 864 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 865 const SimplifyQuery &Q) { 866 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit); 867 } 868 869 /// Given operands for a Mul, see if we can fold the result. 870 /// If not, this returns null. 871 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 872 unsigned MaxRecurse) { 873 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q)) 874 return C; 875 876 // X * undef -> 0 877 // X * 0 -> 0 878 if (match(Op1, m_CombineOr(m_Undef(), m_Zero()))) 879 return Constant::getNullValue(Op0->getType()); 880 881 // X * 1 -> X 882 if (match(Op1, m_One())) 883 return Op0; 884 885 // (X / Y) * Y -> X if the division is exact. 886 Value *X = nullptr; 887 if (Q.IIQ.UseInstrInfo && 888 (match(Op0, 889 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y 890 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y) 891 return X; 892 893 // i1 mul -> and. 894 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) 895 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1)) 896 return V; 897 898 // Try some generic simplifications for associative operations. 899 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, 900 MaxRecurse)) 901 return V; 902 903 // Mul distributes over Add. Try some generic simplifications based on this. 904 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add, 905 Q, MaxRecurse)) 906 return V; 907 908 // If the operation is with the result of a select instruction, check whether 909 // operating on either branch of the select always yields the same value. 910 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 911 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, 912 MaxRecurse)) 913 return V; 914 915 // If the operation is with the result of a phi instruction, check whether 916 // operating on all incoming values of the phi always yields the same value. 917 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 918 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, 919 MaxRecurse)) 920 return V; 921 922 return nullptr; 923 } 924 925 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 926 return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit); 927 } 928 929 /// Check for common or similar folds of integer division or integer remainder. 930 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem). 931 static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) { 932 Type *Ty = Op0->getType(); 933 934 // X / undef -> undef 935 // X % undef -> undef 936 if (match(Op1, m_Undef())) 937 return Op1; 938 939 // X / 0 -> undef 940 // X % 0 -> undef 941 // We don't need to preserve faults! 942 if (match(Op1, m_Zero())) 943 return UndefValue::get(Ty); 944 945 // If any element of a constant divisor vector is zero or undef, the whole op 946 // is undef. 947 auto *Op1C = dyn_cast<Constant>(Op1); 948 if (Op1C && Ty->isVectorTy()) { 949 unsigned NumElts = Ty->getVectorNumElements(); 950 for (unsigned i = 0; i != NumElts; ++i) { 951 Constant *Elt = Op1C->getAggregateElement(i); 952 if (Elt && (Elt->isNullValue() || isa<UndefValue>(Elt))) 953 return UndefValue::get(Ty); 954 } 955 } 956 957 // undef / X -> 0 958 // undef % X -> 0 959 if (match(Op0, m_Undef())) 960 return Constant::getNullValue(Ty); 961 962 // 0 / X -> 0 963 // 0 % X -> 0 964 if (match(Op0, m_Zero())) 965 return Constant::getNullValue(Op0->getType()); 966 967 // X / X -> 1 968 // X % X -> 0 969 if (Op0 == Op1) 970 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty); 971 972 // X / 1 -> X 973 // X % 1 -> 0 974 // If this is a boolean op (single-bit element type), we can't have 975 // division-by-zero or remainder-by-zero, so assume the divisor is 1. 976 // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1. 977 Value *X; 978 if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1) || 979 (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) 980 return IsDiv ? Op0 : Constant::getNullValue(Ty); 981 982 return nullptr; 983 } 984 985 /// Given a predicate and two operands, return true if the comparison is true. 986 /// This is a helper for div/rem simplification where we return some other value 987 /// when we can prove a relationship between the operands. 988 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS, 989 const SimplifyQuery &Q, unsigned MaxRecurse) { 990 Value *V = SimplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse); 991 Constant *C = dyn_cast_or_null<Constant>(V); 992 return (C && C->isAllOnesValue()); 993 } 994 995 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer 996 /// to simplify X % Y to X. 997 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, 998 unsigned MaxRecurse, bool IsSigned) { 999 // Recursion is always used, so bail out at once if we already hit the limit. 1000 if (!MaxRecurse--) 1001 return false; 1002 1003 if (IsSigned) { 1004 // |X| / |Y| --> 0 1005 // 1006 // We require that 1 operand is a simple constant. That could be extended to 1007 // 2 variables if we computed the sign bit for each. 1008 // 1009 // Make sure that a constant is not the minimum signed value because taking 1010 // the abs() of that is undefined. 1011 Type *Ty = X->getType(); 1012 const APInt *C; 1013 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) { 1014 // Is the variable divisor magnitude always greater than the constant 1015 // dividend magnitude? 1016 // |Y| > |C| --> Y < -abs(C) or Y > abs(C) 1017 Constant *PosDividendC = ConstantInt::get(Ty, C->abs()); 1018 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs()); 1019 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) || 1020 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse)) 1021 return true; 1022 } 1023 if (match(Y, m_APInt(C))) { 1024 // Special-case: we can't take the abs() of a minimum signed value. If 1025 // that's the divisor, then all we have to do is prove that the dividend 1026 // is also not the minimum signed value. 1027 if (C->isMinSignedValue()) 1028 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse); 1029 1030 // Is the variable dividend magnitude always less than the constant 1031 // divisor magnitude? 1032 // |X| < |C| --> X > -abs(C) and X < abs(C) 1033 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs()); 1034 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs()); 1035 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) && 1036 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse)) 1037 return true; 1038 } 1039 return false; 1040 } 1041 1042 // IsSigned == false. 1043 // Is the dividend unsigned less than the divisor? 1044 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse); 1045 } 1046 1047 /// These are simplifications common to SDiv and UDiv. 1048 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 1049 const SimplifyQuery &Q, unsigned MaxRecurse) { 1050 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1051 return C; 1052 1053 if (Value *V = simplifyDivRem(Op0, Op1, true)) 1054 return V; 1055 1056 bool IsSigned = Opcode == Instruction::SDiv; 1057 1058 // (X * Y) / Y -> X if the multiplication does not overflow. 1059 Value *X; 1060 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) { 1061 auto *Mul = cast<OverflowingBinaryOperator>(Op0); 1062 // If the Mul does not overflow, then we are good to go. 1063 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) || 1064 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul))) 1065 return X; 1066 // If X has the form X = A / Y, then X * Y cannot overflow. 1067 if ((IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) || 1068 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) 1069 return X; 1070 } 1071 1072 // (X rem Y) / Y -> 0 1073 if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 1074 (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 1075 return Constant::getNullValue(Op0->getType()); 1076 1077 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow 1078 ConstantInt *C1, *C2; 1079 if (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) && 1080 match(Op1, m_ConstantInt(C2))) { 1081 bool Overflow; 1082 (void)C1->getValue().umul_ov(C2->getValue(), Overflow); 1083 if (Overflow) 1084 return Constant::getNullValue(Op0->getType()); 1085 } 1086 1087 // If the operation is with the result of a select instruction, check whether 1088 // operating on either branch of the select always yields the same value. 1089 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1090 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1091 return V; 1092 1093 // If the operation is with the result of a phi instruction, check whether 1094 // operating on all incoming values of the phi always yields the same value. 1095 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1096 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1097 return V; 1098 1099 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned)) 1100 return Constant::getNullValue(Op0->getType()); 1101 1102 return nullptr; 1103 } 1104 1105 /// These are simplifications common to SRem and URem. 1106 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 1107 const SimplifyQuery &Q, unsigned MaxRecurse) { 1108 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1109 return C; 1110 1111 if (Value *V = simplifyDivRem(Op0, Op1, false)) 1112 return V; 1113 1114 // (X % Y) % Y -> X % Y 1115 if ((Opcode == Instruction::SRem && 1116 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 1117 (Opcode == Instruction::URem && 1118 match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 1119 return Op0; 1120 1121 // (X << Y) % X -> 0 1122 if (Q.IIQ.UseInstrInfo && 1123 ((Opcode == Instruction::SRem && 1124 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) || 1125 (Opcode == Instruction::URem && 1126 match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))) 1127 return Constant::getNullValue(Op0->getType()); 1128 1129 // If the operation is with the result of a select instruction, check whether 1130 // operating on either branch of the select always yields the same value. 1131 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1132 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1133 return V; 1134 1135 // If the operation is with the result of a phi instruction, check whether 1136 // operating on all incoming values of the phi always yields the same value. 1137 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1138 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1139 return V; 1140 1141 // If X / Y == 0, then X % Y == X. 1142 if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem)) 1143 return Op0; 1144 1145 return nullptr; 1146 } 1147 1148 /// Given operands for an SDiv, see if we can fold the result. 1149 /// If not, this returns null. 1150 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1151 unsigned MaxRecurse) { 1152 // If two operands are negated and no signed overflow, return -1. 1153 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true)) 1154 return Constant::getAllOnesValue(Op0->getType()); 1155 1156 return simplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse); 1157 } 1158 1159 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1160 return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit); 1161 } 1162 1163 /// Given operands for a UDiv, see if we can fold the result. 1164 /// If not, this returns null. 1165 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1166 unsigned MaxRecurse) { 1167 return simplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse); 1168 } 1169 1170 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1171 return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit); 1172 } 1173 1174 /// Given operands for an SRem, see if we can fold the result. 1175 /// If not, this returns null. 1176 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1177 unsigned MaxRecurse) { 1178 // If the divisor is 0, the result is undefined, so assume the divisor is -1. 1179 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0 1180 Value *X; 1181 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 1182 return ConstantInt::getNullValue(Op0->getType()); 1183 1184 // If the two operands are negated, return 0. 1185 if (isKnownNegation(Op0, Op1)) 1186 return ConstantInt::getNullValue(Op0->getType()); 1187 1188 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse); 1189 } 1190 1191 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1192 return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit); 1193 } 1194 1195 /// Given operands for a URem, see if we can fold the result. 1196 /// If not, this returns null. 1197 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1198 unsigned MaxRecurse) { 1199 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse); 1200 } 1201 1202 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1203 return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit); 1204 } 1205 1206 /// Returns true if a shift by \c Amount always yields undef. 1207 static bool isUndefShift(Value *Amount) { 1208 Constant *C = dyn_cast<Constant>(Amount); 1209 if (!C) 1210 return false; 1211 1212 // X shift by undef -> undef because it may shift by the bitwidth. 1213 if (isa<UndefValue>(C)) 1214 return true; 1215 1216 // Shifting by the bitwidth or more is undefined. 1217 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1218 if (CI->getValue().getLimitedValue() >= 1219 CI->getType()->getScalarSizeInBits()) 1220 return true; 1221 1222 // If all lanes of a vector shift are undefined the whole shift is. 1223 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) { 1224 for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I) 1225 if (!isUndefShift(C->getAggregateElement(I))) 1226 return false; 1227 return true; 1228 } 1229 1230 return false; 1231 } 1232 1233 /// Given operands for an Shl, LShr or AShr, see if we can fold the result. 1234 /// If not, this returns null. 1235 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0, 1236 Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse) { 1237 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1238 return C; 1239 1240 // 0 shift by X -> 0 1241 if (match(Op0, m_Zero())) 1242 return Constant::getNullValue(Op0->getType()); 1243 1244 // X shift by 0 -> X 1245 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones 1246 // would be poison. 1247 Value *X; 1248 if (match(Op1, m_Zero()) || 1249 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) 1250 return Op0; 1251 1252 // Fold undefined shifts. 1253 if (isUndefShift(Op1)) 1254 return UndefValue::get(Op0->getType()); 1255 1256 // If the operation is with the result of a select instruction, check whether 1257 // operating on either branch of the select always yields the same value. 1258 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1259 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1260 return V; 1261 1262 // If the operation is with the result of a phi instruction, check whether 1263 // operating on all incoming values of the phi always yields the same value. 1264 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1265 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1266 return V; 1267 1268 // If any bits in the shift amount make that value greater than or equal to 1269 // the number of bits in the type, the shift is undefined. 1270 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1271 if (Known.One.getLimitedValue() >= Known.getBitWidth()) 1272 return UndefValue::get(Op0->getType()); 1273 1274 // If all valid bits in the shift amount are known zero, the first operand is 1275 // unchanged. 1276 unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth()); 1277 if (Known.countMinTrailingZeros() >= NumValidShiftBits) 1278 return Op0; 1279 1280 return nullptr; 1281 } 1282 1283 /// Given operands for an Shl, LShr or AShr, see if we can 1284 /// fold the result. If not, this returns null. 1285 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, 1286 Value *Op1, bool isExact, const SimplifyQuery &Q, 1287 unsigned MaxRecurse) { 1288 if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse)) 1289 return V; 1290 1291 // X >> X -> 0 1292 if (Op0 == Op1) 1293 return Constant::getNullValue(Op0->getType()); 1294 1295 // undef >> X -> 0 1296 // undef >> X -> undef (if it's exact) 1297 if (match(Op0, m_Undef())) 1298 return isExact ? Op0 : Constant::getNullValue(Op0->getType()); 1299 1300 // The low bit cannot be shifted out of an exact shift if it is set. 1301 if (isExact) { 1302 KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT); 1303 if (Op0Known.One[0]) 1304 return Op0; 1305 } 1306 1307 return nullptr; 1308 } 1309 1310 /// Given operands for an Shl, see if we can fold the result. 1311 /// If not, this returns null. 1312 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 1313 const SimplifyQuery &Q, unsigned MaxRecurse) { 1314 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse)) 1315 return V; 1316 1317 // undef << X -> 0 1318 // undef << X -> undef if (if it's NSW/NUW) 1319 if (match(Op0, m_Undef())) 1320 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType()); 1321 1322 // (X >> A) << A -> X 1323 Value *X; 1324 if (Q.IIQ.UseInstrInfo && 1325 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1))))) 1326 return X; 1327 1328 // shl nuw i8 C, %x -> C iff C has sign bit set. 1329 if (isNUW && match(Op0, m_Negative())) 1330 return Op0; 1331 // NOTE: could use computeKnownBits() / LazyValueInfo, 1332 // but the cost-benefit analysis suggests it isn't worth it. 1333 1334 return nullptr; 1335 } 1336 1337 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 1338 const SimplifyQuery &Q) { 1339 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit); 1340 } 1341 1342 /// Given operands for an LShr, see if we can fold the result. 1343 /// If not, this returns null. 1344 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 1345 const SimplifyQuery &Q, unsigned MaxRecurse) { 1346 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q, 1347 MaxRecurse)) 1348 return V; 1349 1350 // (X << A) >> A -> X 1351 Value *X; 1352 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1)))) 1353 return X; 1354 1355 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A. 1356 // We can return X as we do in the above case since OR alters no bits in X. 1357 // SimplifyDemandedBits in InstCombine can do more general optimization for 1358 // bit manipulation. This pattern aims to provide opportunities for other 1359 // optimizers by supporting a simple but common case in InstSimplify. 1360 Value *Y; 1361 const APInt *ShRAmt, *ShLAmt; 1362 if (match(Op1, m_APInt(ShRAmt)) && 1363 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) && 1364 *ShRAmt == *ShLAmt) { 1365 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1366 const unsigned Width = Op0->getType()->getScalarSizeInBits(); 1367 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros(); 1368 if (ShRAmt->uge(EffWidthY)) 1369 return X; 1370 } 1371 1372 return nullptr; 1373 } 1374 1375 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 1376 const SimplifyQuery &Q) { 1377 return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit); 1378 } 1379 1380 /// Given operands for an AShr, see if we can fold the result. 1381 /// If not, this returns null. 1382 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 1383 const SimplifyQuery &Q, unsigned MaxRecurse) { 1384 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q, 1385 MaxRecurse)) 1386 return V; 1387 1388 // all ones >>a X -> -1 1389 // Do not return Op0 because it may contain undef elements if it's a vector. 1390 if (match(Op0, m_AllOnes())) 1391 return Constant::getAllOnesValue(Op0->getType()); 1392 1393 // (X << A) >> A -> X 1394 Value *X; 1395 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1)))) 1396 return X; 1397 1398 // Arithmetic shifting an all-sign-bit value is a no-op. 1399 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1400 if (NumSignBits == Op0->getType()->getScalarSizeInBits()) 1401 return Op0; 1402 1403 return nullptr; 1404 } 1405 1406 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 1407 const SimplifyQuery &Q) { 1408 return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit); 1409 } 1410 1411 /// Commuted variants are assumed to be handled by calling this function again 1412 /// with the parameters swapped. 1413 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, 1414 ICmpInst *UnsignedICmp, bool IsAnd, 1415 const SimplifyQuery &Q) { 1416 Value *X, *Y; 1417 1418 ICmpInst::Predicate EqPred; 1419 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) || 1420 !ICmpInst::isEquality(EqPred)) 1421 return nullptr; 1422 1423 ICmpInst::Predicate UnsignedPred; 1424 1425 Value *A, *B; 1426 // Y = (A - B); 1427 if (match(Y, m_Sub(m_Value(A), m_Value(B)))) { 1428 if (match(UnsignedICmp, 1429 m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) && 1430 ICmpInst::isUnsigned(UnsignedPred)) { 1431 // A >=/<= B || (A - B) != 0 <--> true 1432 if ((UnsignedPred == ICmpInst::ICMP_UGE || 1433 UnsignedPred == ICmpInst::ICMP_ULE) && 1434 EqPred == ICmpInst::ICMP_NE && !IsAnd) 1435 return ConstantInt::getTrue(UnsignedICmp->getType()); 1436 // A </> B && (A - B) == 0 <--> false 1437 if ((UnsignedPred == ICmpInst::ICMP_ULT || 1438 UnsignedPred == ICmpInst::ICMP_UGT) && 1439 EqPred == ICmpInst::ICMP_EQ && IsAnd) 1440 return ConstantInt::getFalse(UnsignedICmp->getType()); 1441 1442 // A </> B && (A - B) != 0 <--> A </> B 1443 // A </> B || (A - B) != 0 <--> (A - B) != 0 1444 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT || 1445 UnsignedPred == ICmpInst::ICMP_UGT)) 1446 return IsAnd ? UnsignedICmp : ZeroICmp; 1447 1448 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0 1449 // A <=/>= B || (A - B) == 0 <--> A <=/>= B 1450 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE || 1451 UnsignedPred == ICmpInst::ICMP_UGE)) 1452 return IsAnd ? ZeroICmp : UnsignedICmp; 1453 } 1454 1455 // Given Y = (A - B) 1456 // Y >= A && Y != 0 --> Y >= A iff B != 0 1457 // Y < A || Y == 0 --> Y < A iff B != 0 1458 if (match(UnsignedICmp, 1459 m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) { 1460 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd && 1461 EqPred == ICmpInst::ICMP_NE && 1462 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1463 return UnsignedICmp; 1464 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd && 1465 EqPred == ICmpInst::ICMP_EQ && 1466 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1467 return UnsignedICmp; 1468 } 1469 } 1470 1471 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) && 1472 ICmpInst::isUnsigned(UnsignedPred)) 1473 ; 1474 else if (match(UnsignedICmp, 1475 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) && 1476 ICmpInst::isUnsigned(UnsignedPred)) 1477 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred); 1478 else 1479 return nullptr; 1480 1481 // X < Y && Y != 0 --> X < Y 1482 // X < Y || Y != 0 --> Y != 0 1483 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE) 1484 return IsAnd ? UnsignedICmp : ZeroICmp; 1485 1486 // X <= Y && Y != 0 --> X <= Y iff X != 0 1487 // X <= Y || Y != 0 --> Y != 0 iff X != 0 1488 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE && 1489 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1490 return IsAnd ? UnsignedICmp : ZeroICmp; 1491 1492 // X >= Y && Y == 0 --> Y == 0 1493 // X >= Y || Y == 0 --> X >= Y 1494 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ) 1495 return IsAnd ? ZeroICmp : UnsignedICmp; 1496 1497 // X > Y && Y == 0 --> Y == 0 iff X != 0 1498 // X > Y || Y == 0 --> X > Y iff X != 0 1499 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ && 1500 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1501 return IsAnd ? ZeroICmp : UnsignedICmp; 1502 1503 // X < Y && Y == 0 --> false 1504 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ && 1505 IsAnd) 1506 return getFalse(UnsignedICmp->getType()); 1507 1508 // X >= Y || Y != 0 --> true 1509 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE && 1510 !IsAnd) 1511 return getTrue(UnsignedICmp->getType()); 1512 1513 return nullptr; 1514 } 1515 1516 /// Commuted variants are assumed to be handled by calling this function again 1517 /// with the parameters swapped. 1518 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { 1519 ICmpInst::Predicate Pred0, Pred1; 1520 Value *A ,*B; 1521 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || 1522 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) 1523 return nullptr; 1524 1525 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B). 1526 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we 1527 // can eliminate Op1 from this 'and'. 1528 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) 1529 return Op0; 1530 1531 // Check for any combination of predicates that are guaranteed to be disjoint. 1532 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || 1533 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) || 1534 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) || 1535 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)) 1536 return getFalse(Op0->getType()); 1537 1538 return nullptr; 1539 } 1540 1541 /// Commuted variants are assumed to be handled by calling this function again 1542 /// with the parameters swapped. 1543 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { 1544 ICmpInst::Predicate Pred0, Pred1; 1545 Value *A ,*B; 1546 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || 1547 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) 1548 return nullptr; 1549 1550 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B). 1551 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we 1552 // can eliminate Op0 from this 'or'. 1553 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) 1554 return Op1; 1555 1556 // Check for any combination of predicates that cover the entire range of 1557 // possibilities. 1558 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || 1559 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) || 1560 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) || 1561 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE)) 1562 return getTrue(Op0->getType()); 1563 1564 return nullptr; 1565 } 1566 1567 /// Test if a pair of compares with a shared operand and 2 constants has an 1568 /// empty set intersection, full set union, or if one compare is a superset of 1569 /// the other. 1570 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, 1571 bool IsAnd) { 1572 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)). 1573 if (Cmp0->getOperand(0) != Cmp1->getOperand(0)) 1574 return nullptr; 1575 1576 const APInt *C0, *C1; 1577 if (!match(Cmp0->getOperand(1), m_APInt(C0)) || 1578 !match(Cmp1->getOperand(1), m_APInt(C1))) 1579 return nullptr; 1580 1581 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0); 1582 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1); 1583 1584 // For and-of-compares, check if the intersection is empty: 1585 // (icmp X, C0) && (icmp X, C1) --> empty set --> false 1586 if (IsAnd && Range0.intersectWith(Range1).isEmptySet()) 1587 return getFalse(Cmp0->getType()); 1588 1589 // For or-of-compares, check if the union is full: 1590 // (icmp X, C0) || (icmp X, C1) --> full set --> true 1591 if (!IsAnd && Range0.unionWith(Range1).isFullSet()) 1592 return getTrue(Cmp0->getType()); 1593 1594 // Is one range a superset of the other? 1595 // If this is and-of-compares, take the smaller set: 1596 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42 1597 // If this is or-of-compares, take the larger set: 1598 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4 1599 if (Range0.contains(Range1)) 1600 return IsAnd ? Cmp1 : Cmp0; 1601 if (Range1.contains(Range0)) 1602 return IsAnd ? Cmp0 : Cmp1; 1603 1604 return nullptr; 1605 } 1606 1607 static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1, 1608 bool IsAnd) { 1609 ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate(); 1610 if (!match(Cmp0->getOperand(1), m_Zero()) || 1611 !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1) 1612 return nullptr; 1613 1614 if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ)) 1615 return nullptr; 1616 1617 // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)". 1618 Value *X = Cmp0->getOperand(0); 1619 Value *Y = Cmp1->getOperand(0); 1620 1621 // If one of the compares is a masked version of a (not) null check, then 1622 // that compare implies the other, so we eliminate the other. Optionally, look 1623 // through a pointer-to-int cast to match a null check of a pointer type. 1624 1625 // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0 1626 // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0 1627 // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0 1628 // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0 1629 if (match(Y, m_c_And(m_Specific(X), m_Value())) || 1630 match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value()))) 1631 return Cmp1; 1632 1633 // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0 1634 // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0 1635 // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0 1636 // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0 1637 if (match(X, m_c_And(m_Specific(Y), m_Value())) || 1638 match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value()))) 1639 return Cmp0; 1640 1641 return nullptr; 1642 } 1643 1644 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, 1645 const InstrInfoQuery &IIQ) { 1646 // (icmp (add V, C0), C1) & (icmp V, C0) 1647 ICmpInst::Predicate Pred0, Pred1; 1648 const APInt *C0, *C1; 1649 Value *V; 1650 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) 1651 return nullptr; 1652 1653 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) 1654 return nullptr; 1655 1656 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0)); 1657 if (AddInst->getOperand(1) != Op1->getOperand(1)) 1658 return nullptr; 1659 1660 Type *ITy = Op0->getType(); 1661 bool isNSW = IIQ.hasNoSignedWrap(AddInst); 1662 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst); 1663 1664 const APInt Delta = *C1 - *C0; 1665 if (C0->isStrictlyPositive()) { 1666 if (Delta == 2) { 1667 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT) 1668 return getFalse(ITy); 1669 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW) 1670 return getFalse(ITy); 1671 } 1672 if (Delta == 1) { 1673 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT) 1674 return getFalse(ITy); 1675 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW) 1676 return getFalse(ITy); 1677 } 1678 } 1679 if (C0->getBoolValue() && isNUW) { 1680 if (Delta == 2) 1681 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT) 1682 return getFalse(ITy); 1683 if (Delta == 1) 1684 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT) 1685 return getFalse(ITy); 1686 } 1687 1688 return nullptr; 1689 } 1690 1691 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, 1692 const SimplifyQuery &Q) { 1693 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q)) 1694 return X; 1695 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q)) 1696 return X; 1697 1698 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1)) 1699 return X; 1700 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0)) 1701 return X; 1702 1703 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true)) 1704 return X; 1705 1706 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true)) 1707 return X; 1708 1709 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ)) 1710 return X; 1711 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ)) 1712 return X; 1713 1714 return nullptr; 1715 } 1716 1717 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, 1718 const InstrInfoQuery &IIQ) { 1719 // (icmp (add V, C0), C1) | (icmp V, C0) 1720 ICmpInst::Predicate Pred0, Pred1; 1721 const APInt *C0, *C1; 1722 Value *V; 1723 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) 1724 return nullptr; 1725 1726 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) 1727 return nullptr; 1728 1729 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0)); 1730 if (AddInst->getOperand(1) != Op1->getOperand(1)) 1731 return nullptr; 1732 1733 Type *ITy = Op0->getType(); 1734 bool isNSW = IIQ.hasNoSignedWrap(AddInst); 1735 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst); 1736 1737 const APInt Delta = *C1 - *C0; 1738 if (C0->isStrictlyPositive()) { 1739 if (Delta == 2) { 1740 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE) 1741 return getTrue(ITy); 1742 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW) 1743 return getTrue(ITy); 1744 } 1745 if (Delta == 1) { 1746 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE) 1747 return getTrue(ITy); 1748 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW) 1749 return getTrue(ITy); 1750 } 1751 } 1752 if (C0->getBoolValue() && isNUW) { 1753 if (Delta == 2) 1754 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE) 1755 return getTrue(ITy); 1756 if (Delta == 1) 1757 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE) 1758 return getTrue(ITy); 1759 } 1760 1761 return nullptr; 1762 } 1763 1764 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, 1765 const SimplifyQuery &Q) { 1766 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q)) 1767 return X; 1768 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q)) 1769 return X; 1770 1771 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1)) 1772 return X; 1773 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0)) 1774 return X; 1775 1776 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false)) 1777 return X; 1778 1779 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false)) 1780 return X; 1781 1782 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ)) 1783 return X; 1784 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ)) 1785 return X; 1786 1787 return nullptr; 1788 } 1789 1790 static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI, 1791 FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { 1792 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 1793 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 1794 if (LHS0->getType() != RHS0->getType()) 1795 return nullptr; 1796 1797 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 1798 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) || 1799 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) { 1800 // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y 1801 // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X 1802 // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y 1803 // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X 1804 // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y 1805 // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X 1806 // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y 1807 // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X 1808 if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) || 1809 (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1))) 1810 return RHS; 1811 1812 // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y 1813 // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X 1814 // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y 1815 // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X 1816 // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y 1817 // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X 1818 // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y 1819 // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X 1820 if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) || 1821 (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1))) 1822 return LHS; 1823 } 1824 1825 return nullptr; 1826 } 1827 1828 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, 1829 Value *Op0, Value *Op1, bool IsAnd) { 1830 // Look through casts of the 'and' operands to find compares. 1831 auto *Cast0 = dyn_cast<CastInst>(Op0); 1832 auto *Cast1 = dyn_cast<CastInst>(Op1); 1833 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() && 1834 Cast0->getSrcTy() == Cast1->getSrcTy()) { 1835 Op0 = Cast0->getOperand(0); 1836 Op1 = Cast1->getOperand(0); 1837 } 1838 1839 Value *V = nullptr; 1840 auto *ICmp0 = dyn_cast<ICmpInst>(Op0); 1841 auto *ICmp1 = dyn_cast<ICmpInst>(Op1); 1842 if (ICmp0 && ICmp1) 1843 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q) 1844 : simplifyOrOfICmps(ICmp0, ICmp1, Q); 1845 1846 auto *FCmp0 = dyn_cast<FCmpInst>(Op0); 1847 auto *FCmp1 = dyn_cast<FCmpInst>(Op1); 1848 if (FCmp0 && FCmp1) 1849 V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd); 1850 1851 if (!V) 1852 return nullptr; 1853 if (!Cast0) 1854 return V; 1855 1856 // If we looked through casts, we can only handle a constant simplification 1857 // because we are not allowed to create a cast instruction here. 1858 if (auto *C = dyn_cast<Constant>(V)) 1859 return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType()); 1860 1861 return nullptr; 1862 } 1863 1864 /// Check that the Op1 is in expected form, i.e.: 1865 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???) 1866 /// %Op1 = extractvalue { i4, i1 } %Agg, 1 1867 static bool omitCheckForZeroBeforeMulWithOverflowInternal(Value *Op1, 1868 Value *X) { 1869 auto *Extract = dyn_cast<ExtractValueInst>(Op1); 1870 // We should only be extracting the overflow bit. 1871 if (!Extract || !Extract->getIndices().equals(1)) 1872 return false; 1873 Value *Agg = Extract->getAggregateOperand(); 1874 // This should be a multiplication-with-overflow intrinsic. 1875 if (!match(Agg, m_CombineOr(m_Intrinsic<Intrinsic::umul_with_overflow>(), 1876 m_Intrinsic<Intrinsic::smul_with_overflow>()))) 1877 return false; 1878 // One of its multipliers should be the value we checked for zero before. 1879 if (!match(Agg, m_CombineOr(m_Argument<0>(m_Specific(X)), 1880 m_Argument<1>(m_Specific(X))))) 1881 return false; 1882 return true; 1883 } 1884 1885 /// The @llvm.[us]mul.with.overflow intrinsic could have been folded from some 1886 /// other form of check, e.g. one that was using division; it may have been 1887 /// guarded against division-by-zero. We can drop that check now. 1888 /// Look for: 1889 /// %Op0 = icmp ne i4 %X, 0 1890 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???) 1891 /// %Op1 = extractvalue { i4, i1 } %Agg, 1 1892 /// %??? = and i1 %Op0, %Op1 1893 /// We can just return %Op1 1894 static Value *omitCheckForZeroBeforeMulWithOverflow(Value *Op0, Value *Op1) { 1895 ICmpInst::Predicate Pred; 1896 Value *X; 1897 if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())) || 1898 Pred != ICmpInst::Predicate::ICMP_NE) 1899 return nullptr; 1900 // Is Op1 in expected form? 1901 if (!omitCheckForZeroBeforeMulWithOverflowInternal(Op1, X)) 1902 return nullptr; 1903 // Can omit 'and', and just return the overflow bit. 1904 return Op1; 1905 } 1906 1907 /// The @llvm.[us]mul.with.overflow intrinsic could have been folded from some 1908 /// other form of check, e.g. one that was using division; it may have been 1909 /// guarded against division-by-zero. We can drop that check now. 1910 /// Look for: 1911 /// %Op0 = icmp eq i4 %X, 0 1912 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???) 1913 /// %Op1 = extractvalue { i4, i1 } %Agg, 1 1914 /// %NotOp1 = xor i1 %Op1, true 1915 /// %or = or i1 %Op0, %NotOp1 1916 /// We can just return %NotOp1 1917 static Value *omitCheckForZeroBeforeInvertedMulWithOverflow(Value *Op0, 1918 Value *NotOp1) { 1919 ICmpInst::Predicate Pred; 1920 Value *X; 1921 if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())) || 1922 Pred != ICmpInst::Predicate::ICMP_EQ) 1923 return nullptr; 1924 // We expect the other hand of an 'or' to be a 'not'. 1925 Value *Op1; 1926 if (!match(NotOp1, m_Not(m_Value(Op1)))) 1927 return nullptr; 1928 // Is Op1 in expected form? 1929 if (!omitCheckForZeroBeforeMulWithOverflowInternal(Op1, X)) 1930 return nullptr; 1931 // Can omit 'and', and just return the inverted overflow bit. 1932 return NotOp1; 1933 } 1934 1935 /// Given operands for an And, see if we can fold the result. 1936 /// If not, this returns null. 1937 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1938 unsigned MaxRecurse) { 1939 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q)) 1940 return C; 1941 1942 // X & undef -> 0 1943 if (match(Op1, m_Undef())) 1944 return Constant::getNullValue(Op0->getType()); 1945 1946 // X & X = X 1947 if (Op0 == Op1) 1948 return Op0; 1949 1950 // X & 0 = 0 1951 if (match(Op1, m_Zero())) 1952 return Constant::getNullValue(Op0->getType()); 1953 1954 // X & -1 = X 1955 if (match(Op1, m_AllOnes())) 1956 return Op0; 1957 1958 // A & ~A = ~A & A = 0 1959 if (match(Op0, m_Not(m_Specific(Op1))) || 1960 match(Op1, m_Not(m_Specific(Op0)))) 1961 return Constant::getNullValue(Op0->getType()); 1962 1963 // (A | ?) & A = A 1964 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value()))) 1965 return Op1; 1966 1967 // A & (A | ?) = A 1968 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value()))) 1969 return Op0; 1970 1971 // A mask that only clears known zeros of a shifted value is a no-op. 1972 Value *X; 1973 const APInt *Mask; 1974 const APInt *ShAmt; 1975 if (match(Op1, m_APInt(Mask))) { 1976 // If all bits in the inverted and shifted mask are clear: 1977 // and (shl X, ShAmt), Mask --> shl X, ShAmt 1978 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) && 1979 (~(*Mask)).lshr(*ShAmt).isNullValue()) 1980 return Op0; 1981 1982 // If all bits in the inverted and shifted mask are clear: 1983 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt 1984 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) && 1985 (~(*Mask)).shl(*ShAmt).isNullValue()) 1986 return Op0; 1987 } 1988 1989 // If we have a multiplication overflow check that is being 'and'ed with a 1990 // check that one of the multipliers is not zero, we can omit the 'and', and 1991 // only keep the overflow check. 1992 if (Value *V = omitCheckForZeroBeforeMulWithOverflow(Op0, Op1)) 1993 return V; 1994 if (Value *V = omitCheckForZeroBeforeMulWithOverflow(Op1, Op0)) 1995 return V; 1996 1997 // A & (-A) = A if A is a power of two or zero. 1998 if (match(Op0, m_Neg(m_Specific(Op1))) || 1999 match(Op1, m_Neg(m_Specific(Op0)))) { 2000 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, 2001 Q.DT)) 2002 return Op0; 2003 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, 2004 Q.DT)) 2005 return Op1; 2006 } 2007 2008 // This is a similar pattern used for checking if a value is a power-of-2: 2009 // (A - 1) & A --> 0 (if A is a power-of-2 or 0) 2010 // A & (A - 1) --> 0 (if A is a power-of-2 or 0) 2011 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) && 2012 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT)) 2013 return Constant::getNullValue(Op1->getType()); 2014 if (match(Op1, m_Add(m_Specific(Op0), m_AllOnes())) && 2015 isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT)) 2016 return Constant::getNullValue(Op0->getType()); 2017 2018 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true)) 2019 return V; 2020 2021 // Try some generic simplifications for associative operations. 2022 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, 2023 MaxRecurse)) 2024 return V; 2025 2026 // And distributes over Or. Try some generic simplifications based on this. 2027 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or, 2028 Q, MaxRecurse)) 2029 return V; 2030 2031 // And distributes over Xor. Try some generic simplifications based on this. 2032 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor, 2033 Q, MaxRecurse)) 2034 return V; 2035 2036 // If the operation is with the result of a select instruction, check whether 2037 // operating on either branch of the select always yields the same value. 2038 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 2039 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q, 2040 MaxRecurse)) 2041 return V; 2042 2043 // If the operation is with the result of a phi instruction, check whether 2044 // operating on all incoming values of the phi always yields the same value. 2045 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 2046 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q, 2047 MaxRecurse)) 2048 return V; 2049 2050 // Assuming the effective width of Y is not larger than A, i.e. all bits 2051 // from X and Y are disjoint in (X << A) | Y, 2052 // if the mask of this AND op covers all bits of X or Y, while it covers 2053 // no bits from the other, we can bypass this AND op. E.g., 2054 // ((X << A) | Y) & Mask -> Y, 2055 // if Mask = ((1 << effective_width_of(Y)) - 1) 2056 // ((X << A) | Y) & Mask -> X << A, 2057 // if Mask = ((1 << effective_width_of(X)) - 1) << A 2058 // SimplifyDemandedBits in InstCombine can optimize the general case. 2059 // This pattern aims to help other passes for a common case. 2060 Value *Y, *XShifted; 2061 if (match(Op1, m_APInt(Mask)) && 2062 match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)), 2063 m_Value(XShifted)), 2064 m_Value(Y)))) { 2065 const unsigned Width = Op0->getType()->getScalarSizeInBits(); 2066 const unsigned ShftCnt = ShAmt->getLimitedValue(Width); 2067 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2068 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros(); 2069 if (EffWidthY <= ShftCnt) { 2070 const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI, 2071 Q.DT); 2072 const unsigned EffWidthX = Width - XKnown.countMinLeadingZeros(); 2073 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY); 2074 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt; 2075 // If the mask is extracting all bits from X or Y as is, we can skip 2076 // this AND op. 2077 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask)) 2078 return Y; 2079 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask)) 2080 return XShifted; 2081 } 2082 } 2083 2084 return nullptr; 2085 } 2086 2087 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 2088 return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit); 2089 } 2090 2091 /// Given operands for an Or, see if we can fold the result. 2092 /// If not, this returns null. 2093 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 2094 unsigned MaxRecurse) { 2095 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q)) 2096 return C; 2097 2098 // X | undef -> -1 2099 // X | -1 = -1 2100 // Do not return Op1 because it may contain undef elements if it's a vector. 2101 if (match(Op1, m_Undef()) || match(Op1, m_AllOnes())) 2102 return Constant::getAllOnesValue(Op0->getType()); 2103 2104 // X | X = X 2105 // X | 0 = X 2106 if (Op0 == Op1 || match(Op1, m_Zero())) 2107 return Op0; 2108 2109 // A | ~A = ~A | A = -1 2110 if (match(Op0, m_Not(m_Specific(Op1))) || 2111 match(Op1, m_Not(m_Specific(Op0)))) 2112 return Constant::getAllOnesValue(Op0->getType()); 2113 2114 // (A & ?) | A = A 2115 if (match(Op0, m_c_And(m_Specific(Op1), m_Value()))) 2116 return Op1; 2117 2118 // A | (A & ?) = A 2119 if (match(Op1, m_c_And(m_Specific(Op0), m_Value()))) 2120 return Op0; 2121 2122 // ~(A & ?) | A = -1 2123 if (match(Op0, m_Not(m_c_And(m_Specific(Op1), m_Value())))) 2124 return Constant::getAllOnesValue(Op1->getType()); 2125 2126 // A | ~(A & ?) = -1 2127 if (match(Op1, m_Not(m_c_And(m_Specific(Op1), m_Value())))) 2128 return Constant::getAllOnesValue(Op0->getType()); 2129 2130 Value *A, *B; 2131 // (A & ~B) | (A ^ B) -> (A ^ B) 2132 // (~B & A) | (A ^ B) -> (A ^ B) 2133 // (A & ~B) | (B ^ A) -> (B ^ A) 2134 // (~B & A) | (B ^ A) -> (B ^ A) 2135 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && 2136 (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) || 2137 match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))) 2138 return Op1; 2139 2140 // Commute the 'or' operands. 2141 // (A ^ B) | (A & ~B) -> (A ^ B) 2142 // (A ^ B) | (~B & A) -> (A ^ B) 2143 // (B ^ A) | (A & ~B) -> (B ^ A) 2144 // (B ^ A) | (~B & A) -> (B ^ A) 2145 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && 2146 (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) || 2147 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))) 2148 return Op0; 2149 2150 // (A & B) | (~A ^ B) -> (~A ^ B) 2151 // (B & A) | (~A ^ B) -> (~A ^ B) 2152 // (A & B) | (B ^ ~A) -> (B ^ ~A) 2153 // (B & A) | (B ^ ~A) -> (B ^ ~A) 2154 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 2155 (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) || 2156 match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) 2157 return Op1; 2158 2159 // (~A ^ B) | (A & B) -> (~A ^ B) 2160 // (~A ^ B) | (B & A) -> (~A ^ B) 2161 // (B ^ ~A) | (A & B) -> (B ^ ~A) 2162 // (B ^ ~A) | (B & A) -> (B ^ ~A) 2163 if (match(Op1, m_And(m_Value(A), m_Value(B))) && 2164 (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) || 2165 match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) 2166 return Op0; 2167 2168 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false)) 2169 return V; 2170 2171 // If we have a multiplication overflow check that is being 'and'ed with a 2172 // check that one of the multipliers is not zero, we can omit the 'and', and 2173 // only keep the overflow check. 2174 if (Value *V = omitCheckForZeroBeforeInvertedMulWithOverflow(Op0, Op1)) 2175 return V; 2176 if (Value *V = omitCheckForZeroBeforeInvertedMulWithOverflow(Op1, Op0)) 2177 return V; 2178 2179 // Try some generic simplifications for associative operations. 2180 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, 2181 MaxRecurse)) 2182 return V; 2183 2184 // Or distributes over And. Try some generic simplifications based on this. 2185 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q, 2186 MaxRecurse)) 2187 return V; 2188 2189 // If the operation is with the result of a select instruction, check whether 2190 // operating on either branch of the select always yields the same value. 2191 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 2192 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, 2193 MaxRecurse)) 2194 return V; 2195 2196 // (A & C1)|(B & C2) 2197 const APInt *C1, *C2; 2198 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) && 2199 match(Op1, m_And(m_Value(B), m_APInt(C2)))) { 2200 if (*C1 == ~*C2) { 2201 // (A & C1)|(B & C2) 2202 // If we have: ((V + N) & C1) | (V & C2) 2203 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0 2204 // replace with V+N. 2205 Value *N; 2206 if (C2->isMask() && // C2 == 0+1+ 2207 match(A, m_c_Add(m_Specific(B), m_Value(N)))) { 2208 // Add commutes, try both ways. 2209 if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2210 return A; 2211 } 2212 // Or commutes, try both ways. 2213 if (C1->isMask() && 2214 match(B, m_c_Add(m_Specific(A), m_Value(N)))) { 2215 // Add commutes, try both ways. 2216 if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2217 return B; 2218 } 2219 } 2220 } 2221 2222 // If the operation is with the result of a phi instruction, check whether 2223 // operating on all incoming values of the phi always yields the same value. 2224 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 2225 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse)) 2226 return V; 2227 2228 return nullptr; 2229 } 2230 2231 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 2232 return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit); 2233 } 2234 2235 /// Given operands for a Xor, see if we can fold the result. 2236 /// If not, this returns null. 2237 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 2238 unsigned MaxRecurse) { 2239 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q)) 2240 return C; 2241 2242 // A ^ undef -> undef 2243 if (match(Op1, m_Undef())) 2244 return Op1; 2245 2246 // A ^ 0 = A 2247 if (match(Op1, m_Zero())) 2248 return Op0; 2249 2250 // A ^ A = 0 2251 if (Op0 == Op1) 2252 return Constant::getNullValue(Op0->getType()); 2253 2254 // A ^ ~A = ~A ^ A = -1 2255 if (match(Op0, m_Not(m_Specific(Op1))) || 2256 match(Op1, m_Not(m_Specific(Op0)))) 2257 return Constant::getAllOnesValue(Op0->getType()); 2258 2259 // Try some generic simplifications for associative operations. 2260 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, 2261 MaxRecurse)) 2262 return V; 2263 2264 // Threading Xor over selects and phi nodes is pointless, so don't bother. 2265 // Threading over the select in "A ^ select(cond, B, C)" means evaluating 2266 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and 2267 // only if B and C are equal. If B and C are equal then (since we assume 2268 // that operands have already been simplified) "select(cond, B, C)" should 2269 // have been simplified to the common value of B and C already. Analysing 2270 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly 2271 // for threading over phi nodes. 2272 2273 return nullptr; 2274 } 2275 2276 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 2277 return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit); 2278 } 2279 2280 2281 static Type *GetCompareTy(Value *Op) { 2282 return CmpInst::makeCmpResultType(Op->getType()); 2283 } 2284 2285 /// Rummage around inside V looking for something equivalent to the comparison 2286 /// "LHS Pred RHS". Return such a value if found, otherwise return null. 2287 /// Helper function for analyzing max/min idioms. 2288 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred, 2289 Value *LHS, Value *RHS) { 2290 SelectInst *SI = dyn_cast<SelectInst>(V); 2291 if (!SI) 2292 return nullptr; 2293 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 2294 if (!Cmp) 2295 return nullptr; 2296 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1); 2297 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS) 2298 return Cmp; 2299 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) && 2300 LHS == CmpRHS && RHS == CmpLHS) 2301 return Cmp; 2302 return nullptr; 2303 } 2304 2305 // A significant optimization not implemented here is assuming that alloca 2306 // addresses are not equal to incoming argument values. They don't *alias*, 2307 // as we say, but that doesn't mean they aren't equal, so we take a 2308 // conservative approach. 2309 // 2310 // This is inspired in part by C++11 5.10p1: 2311 // "Two pointers of the same type compare equal if and only if they are both 2312 // null, both point to the same function, or both represent the same 2313 // address." 2314 // 2315 // This is pretty permissive. 2316 // 2317 // It's also partly due to C11 6.5.9p6: 2318 // "Two pointers compare equal if and only if both are null pointers, both are 2319 // pointers to the same object (including a pointer to an object and a 2320 // subobject at its beginning) or function, both are pointers to one past the 2321 // last element of the same array object, or one is a pointer to one past the 2322 // end of one array object and the other is a pointer to the start of a 2323 // different array object that happens to immediately follow the first array 2324 // object in the address space.) 2325 // 2326 // C11's version is more restrictive, however there's no reason why an argument 2327 // couldn't be a one-past-the-end value for a stack object in the caller and be 2328 // equal to the beginning of a stack object in the callee. 2329 // 2330 // If the C and C++ standards are ever made sufficiently restrictive in this 2331 // area, it may be possible to update LLVM's semantics accordingly and reinstate 2332 // this optimization. 2333 static Constant * 2334 computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI, 2335 const DominatorTree *DT, CmpInst::Predicate Pred, 2336 AssumptionCache *AC, const Instruction *CxtI, 2337 const InstrInfoQuery &IIQ, Value *LHS, Value *RHS) { 2338 // First, skip past any trivial no-ops. 2339 LHS = LHS->stripPointerCasts(); 2340 RHS = RHS->stripPointerCasts(); 2341 2342 // A non-null pointer is not equal to a null pointer. 2343 if (llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr, 2344 IIQ.UseInstrInfo) && 2345 isa<ConstantPointerNull>(RHS) && 2346 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE)) 2347 return ConstantInt::get(GetCompareTy(LHS), 2348 !CmpInst::isTrueWhenEqual(Pred)); 2349 2350 // We can only fold certain predicates on pointer comparisons. 2351 switch (Pred) { 2352 default: 2353 return nullptr; 2354 2355 // Equality comaprisons are easy to fold. 2356 case CmpInst::ICMP_EQ: 2357 case CmpInst::ICMP_NE: 2358 break; 2359 2360 // We can only handle unsigned relational comparisons because 'inbounds' on 2361 // a GEP only protects against unsigned wrapping. 2362 case CmpInst::ICMP_UGT: 2363 case CmpInst::ICMP_UGE: 2364 case CmpInst::ICMP_ULT: 2365 case CmpInst::ICMP_ULE: 2366 // However, we have to switch them to their signed variants to handle 2367 // negative indices from the base pointer. 2368 Pred = ICmpInst::getSignedPredicate(Pred); 2369 break; 2370 } 2371 2372 // Strip off any constant offsets so that we can reason about them. 2373 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets 2374 // here and compare base addresses like AliasAnalysis does, however there are 2375 // numerous hazards. AliasAnalysis and its utilities rely on special rules 2376 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis 2377 // doesn't need to guarantee pointer inequality when it says NoAlias. 2378 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 2379 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 2380 2381 // If LHS and RHS are related via constant offsets to the same base 2382 // value, we can replace it with an icmp which just compares the offsets. 2383 if (LHS == RHS) 2384 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset); 2385 2386 // Various optimizations for (in)equality comparisons. 2387 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { 2388 // Different non-empty allocations that exist at the same time have 2389 // different addresses (if the program can tell). Global variables always 2390 // exist, so they always exist during the lifetime of each other and all 2391 // allocas. Two different allocas usually have different addresses... 2392 // 2393 // However, if there's an @llvm.stackrestore dynamically in between two 2394 // allocas, they may have the same address. It's tempting to reduce the 2395 // scope of the problem by only looking at *static* allocas here. That would 2396 // cover the majority of allocas while significantly reducing the likelihood 2397 // of having an @llvm.stackrestore pop up in the middle. However, it's not 2398 // actually impossible for an @llvm.stackrestore to pop up in the middle of 2399 // an entry block. Also, if we have a block that's not attached to a 2400 // function, we can't tell if it's "static" under the current definition. 2401 // Theoretically, this problem could be fixed by creating a new kind of 2402 // instruction kind specifically for static allocas. Such a new instruction 2403 // could be required to be at the top of the entry block, thus preventing it 2404 // from being subject to a @llvm.stackrestore. Instcombine could even 2405 // convert regular allocas into these special allocas. It'd be nifty. 2406 // However, until then, this problem remains open. 2407 // 2408 // So, we'll assume that two non-empty allocas have different addresses 2409 // for now. 2410 // 2411 // With all that, if the offsets are within the bounds of their allocations 2412 // (and not one-past-the-end! so we can't use inbounds!), and their 2413 // allocations aren't the same, the pointers are not equal. 2414 // 2415 // Note that it's not necessary to check for LHS being a global variable 2416 // address, due to canonicalization and constant folding. 2417 if (isa<AllocaInst>(LHS) && 2418 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) { 2419 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset); 2420 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset); 2421 uint64_t LHSSize, RHSSize; 2422 ObjectSizeOpts Opts; 2423 Opts.NullIsUnknownSize = 2424 NullPointerIsDefined(cast<AllocaInst>(LHS)->getFunction()); 2425 if (LHSOffsetCI && RHSOffsetCI && 2426 getObjectSize(LHS, LHSSize, DL, TLI, Opts) && 2427 getObjectSize(RHS, RHSSize, DL, TLI, Opts)) { 2428 const APInt &LHSOffsetValue = LHSOffsetCI->getValue(); 2429 const APInt &RHSOffsetValue = RHSOffsetCI->getValue(); 2430 if (!LHSOffsetValue.isNegative() && 2431 !RHSOffsetValue.isNegative() && 2432 LHSOffsetValue.ult(LHSSize) && 2433 RHSOffsetValue.ult(RHSSize)) { 2434 return ConstantInt::get(GetCompareTy(LHS), 2435 !CmpInst::isTrueWhenEqual(Pred)); 2436 } 2437 } 2438 2439 // Repeat the above check but this time without depending on DataLayout 2440 // or being able to compute a precise size. 2441 if (!cast<PointerType>(LHS->getType())->isEmptyTy() && 2442 !cast<PointerType>(RHS->getType())->isEmptyTy() && 2443 LHSOffset->isNullValue() && 2444 RHSOffset->isNullValue()) 2445 return ConstantInt::get(GetCompareTy(LHS), 2446 !CmpInst::isTrueWhenEqual(Pred)); 2447 } 2448 2449 // Even if an non-inbounds GEP occurs along the path we can still optimize 2450 // equality comparisons concerning the result. We avoid walking the whole 2451 // chain again by starting where the last calls to 2452 // stripAndComputeConstantOffsets left off and accumulate the offsets. 2453 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true); 2454 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true); 2455 if (LHS == RHS) 2456 return ConstantExpr::getICmp(Pred, 2457 ConstantExpr::getAdd(LHSOffset, LHSNoBound), 2458 ConstantExpr::getAdd(RHSOffset, RHSNoBound)); 2459 2460 // If one side of the equality comparison must come from a noalias call 2461 // (meaning a system memory allocation function), and the other side must 2462 // come from a pointer that cannot overlap with dynamically-allocated 2463 // memory within the lifetime of the current function (allocas, byval 2464 // arguments, globals), then determine the comparison result here. 2465 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs; 2466 GetUnderlyingObjects(LHS, LHSUObjs, DL); 2467 GetUnderlyingObjects(RHS, RHSUObjs, DL); 2468 2469 // Is the set of underlying objects all noalias calls? 2470 auto IsNAC = [](ArrayRef<const Value *> Objects) { 2471 return all_of(Objects, isNoAliasCall); 2472 }; 2473 2474 // Is the set of underlying objects all things which must be disjoint from 2475 // noalias calls. For allocas, we consider only static ones (dynamic 2476 // allocas might be transformed into calls to malloc not simultaneously 2477 // live with the compared-to allocation). For globals, we exclude symbols 2478 // that might be resolve lazily to symbols in another dynamically-loaded 2479 // library (and, thus, could be malloc'ed by the implementation). 2480 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) { 2481 return all_of(Objects, [](const Value *V) { 2482 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) 2483 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca(); 2484 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 2485 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() || 2486 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) && 2487 !GV->isThreadLocal(); 2488 if (const Argument *A = dyn_cast<Argument>(V)) 2489 return A->hasByValAttr(); 2490 return false; 2491 }); 2492 }; 2493 2494 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) || 2495 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs))) 2496 return ConstantInt::get(GetCompareTy(LHS), 2497 !CmpInst::isTrueWhenEqual(Pred)); 2498 2499 // Fold comparisons for non-escaping pointer even if the allocation call 2500 // cannot be elided. We cannot fold malloc comparison to null. Also, the 2501 // dynamic allocation call could be either of the operands. 2502 Value *MI = nullptr; 2503 if (isAllocLikeFn(LHS, TLI) && 2504 llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT)) 2505 MI = LHS; 2506 else if (isAllocLikeFn(RHS, TLI) && 2507 llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT)) 2508 MI = RHS; 2509 // FIXME: We should also fold the compare when the pointer escapes, but the 2510 // compare dominates the pointer escape 2511 if (MI && !PointerMayBeCaptured(MI, true, true)) 2512 return ConstantInt::get(GetCompareTy(LHS), 2513 CmpInst::isFalseWhenEqual(Pred)); 2514 } 2515 2516 // Otherwise, fail. 2517 return nullptr; 2518 } 2519 2520 /// Fold an icmp when its operands have i1 scalar type. 2521 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS, 2522 Value *RHS, const SimplifyQuery &Q) { 2523 Type *ITy = GetCompareTy(LHS); // The return type. 2524 Type *OpTy = LHS->getType(); // The operand type. 2525 if (!OpTy->isIntOrIntVectorTy(1)) 2526 return nullptr; 2527 2528 // A boolean compared to true/false can be simplified in 14 out of the 20 2529 // (10 predicates * 2 constants) possible combinations. Cases not handled here 2530 // require a 'not' of the LHS, so those must be transformed in InstCombine. 2531 if (match(RHS, m_Zero())) { 2532 switch (Pred) { 2533 case CmpInst::ICMP_NE: // X != 0 -> X 2534 case CmpInst::ICMP_UGT: // X >u 0 -> X 2535 case CmpInst::ICMP_SLT: // X <s 0 -> X 2536 return LHS; 2537 2538 case CmpInst::ICMP_ULT: // X <u 0 -> false 2539 case CmpInst::ICMP_SGT: // X >s 0 -> false 2540 return getFalse(ITy); 2541 2542 case CmpInst::ICMP_UGE: // X >=u 0 -> true 2543 case CmpInst::ICMP_SLE: // X <=s 0 -> true 2544 return getTrue(ITy); 2545 2546 default: break; 2547 } 2548 } else if (match(RHS, m_One())) { 2549 switch (Pred) { 2550 case CmpInst::ICMP_EQ: // X == 1 -> X 2551 case CmpInst::ICMP_UGE: // X >=u 1 -> X 2552 case CmpInst::ICMP_SLE: // X <=s -1 -> X 2553 return LHS; 2554 2555 case CmpInst::ICMP_UGT: // X >u 1 -> false 2556 case CmpInst::ICMP_SLT: // X <s -1 -> false 2557 return getFalse(ITy); 2558 2559 case CmpInst::ICMP_ULE: // X <=u 1 -> true 2560 case CmpInst::ICMP_SGE: // X >=s -1 -> true 2561 return getTrue(ITy); 2562 2563 default: break; 2564 } 2565 } 2566 2567 switch (Pred) { 2568 default: 2569 break; 2570 case ICmpInst::ICMP_UGE: 2571 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false)) 2572 return getTrue(ITy); 2573 break; 2574 case ICmpInst::ICMP_SGE: 2575 /// For signed comparison, the values for an i1 are 0 and -1 2576 /// respectively. This maps into a truth table of: 2577 /// LHS | RHS | LHS >=s RHS | LHS implies RHS 2578 /// 0 | 0 | 1 (0 >= 0) | 1 2579 /// 0 | 1 | 1 (0 >= -1) | 1 2580 /// 1 | 0 | 0 (-1 >= 0) | 0 2581 /// 1 | 1 | 1 (-1 >= -1) | 1 2582 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false)) 2583 return getTrue(ITy); 2584 break; 2585 case ICmpInst::ICMP_ULE: 2586 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false)) 2587 return getTrue(ITy); 2588 break; 2589 } 2590 2591 return nullptr; 2592 } 2593 2594 /// Try hard to fold icmp with zero RHS because this is a common case. 2595 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS, 2596 Value *RHS, const SimplifyQuery &Q) { 2597 if (!match(RHS, m_Zero())) 2598 return nullptr; 2599 2600 Type *ITy = GetCompareTy(LHS); // The return type. 2601 switch (Pred) { 2602 default: 2603 llvm_unreachable("Unknown ICmp predicate!"); 2604 case ICmpInst::ICMP_ULT: 2605 return getFalse(ITy); 2606 case ICmpInst::ICMP_UGE: 2607 return getTrue(ITy); 2608 case ICmpInst::ICMP_EQ: 2609 case ICmpInst::ICMP_ULE: 2610 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) 2611 return getFalse(ITy); 2612 break; 2613 case ICmpInst::ICMP_NE: 2614 case ICmpInst::ICMP_UGT: 2615 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) 2616 return getTrue(ITy); 2617 break; 2618 case ICmpInst::ICMP_SLT: { 2619 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2620 if (LHSKnown.isNegative()) 2621 return getTrue(ITy); 2622 if (LHSKnown.isNonNegative()) 2623 return getFalse(ITy); 2624 break; 2625 } 2626 case ICmpInst::ICMP_SLE: { 2627 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2628 if (LHSKnown.isNegative()) 2629 return getTrue(ITy); 2630 if (LHSKnown.isNonNegative() && 2631 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2632 return getFalse(ITy); 2633 break; 2634 } 2635 case ICmpInst::ICMP_SGE: { 2636 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2637 if (LHSKnown.isNegative()) 2638 return getFalse(ITy); 2639 if (LHSKnown.isNonNegative()) 2640 return getTrue(ITy); 2641 break; 2642 } 2643 case ICmpInst::ICMP_SGT: { 2644 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2645 if (LHSKnown.isNegative()) 2646 return getFalse(ITy); 2647 if (LHSKnown.isNonNegative() && 2648 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2649 return getTrue(ITy); 2650 break; 2651 } 2652 } 2653 2654 return nullptr; 2655 } 2656 2657 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS, 2658 Value *RHS, const InstrInfoQuery &IIQ) { 2659 Type *ITy = GetCompareTy(RHS); // The return type. 2660 2661 Value *X; 2662 // Sign-bit checks can be optimized to true/false after unsigned 2663 // floating-point casts: 2664 // icmp slt (bitcast (uitofp X)), 0 --> false 2665 // icmp sgt (bitcast (uitofp X)), -1 --> true 2666 if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) { 2667 if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero())) 2668 return ConstantInt::getFalse(ITy); 2669 if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes())) 2670 return ConstantInt::getTrue(ITy); 2671 } 2672 2673 const APInt *C; 2674 if (!match(RHS, m_APInt(C))) 2675 return nullptr; 2676 2677 // Rule out tautological comparisons (eg., ult 0 or uge 0). 2678 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C); 2679 if (RHS_CR.isEmptySet()) 2680 return ConstantInt::getFalse(ITy); 2681 if (RHS_CR.isFullSet()) 2682 return ConstantInt::getTrue(ITy); 2683 2684 ConstantRange LHS_CR = computeConstantRange(LHS, IIQ.UseInstrInfo); 2685 if (!LHS_CR.isFullSet()) { 2686 if (RHS_CR.contains(LHS_CR)) 2687 return ConstantInt::getTrue(ITy); 2688 if (RHS_CR.inverse().contains(LHS_CR)) 2689 return ConstantInt::getFalse(ITy); 2690 } 2691 2692 return nullptr; 2693 } 2694 2695 /// TODO: A large part of this logic is duplicated in InstCombine's 2696 /// foldICmpBinOp(). We should be able to share that and avoid the code 2697 /// duplication. 2698 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, 2699 Value *RHS, const SimplifyQuery &Q, 2700 unsigned MaxRecurse) { 2701 Type *ITy = GetCompareTy(LHS); // The return type. 2702 2703 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS); 2704 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS); 2705 if (MaxRecurse && (LBO || RBO)) { 2706 // Analyze the case when either LHS or RHS is an add instruction. 2707 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 2708 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null). 2709 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false; 2710 if (LBO && LBO->getOpcode() == Instruction::Add) { 2711 A = LBO->getOperand(0); 2712 B = LBO->getOperand(1); 2713 NoLHSWrapProblem = 2714 ICmpInst::isEquality(Pred) || 2715 (CmpInst::isUnsigned(Pred) && 2716 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) || 2717 (CmpInst::isSigned(Pred) && 2718 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO))); 2719 } 2720 if (RBO && RBO->getOpcode() == Instruction::Add) { 2721 C = RBO->getOperand(0); 2722 D = RBO->getOperand(1); 2723 NoRHSWrapProblem = 2724 ICmpInst::isEquality(Pred) || 2725 (CmpInst::isUnsigned(Pred) && 2726 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) || 2727 (CmpInst::isSigned(Pred) && 2728 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO))); 2729 } 2730 2731 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. 2732 if ((A == RHS || B == RHS) && NoLHSWrapProblem) 2733 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A, 2734 Constant::getNullValue(RHS->getType()), Q, 2735 MaxRecurse - 1)) 2736 return V; 2737 2738 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. 2739 if ((C == LHS || D == LHS) && NoRHSWrapProblem) 2740 if (Value *V = 2741 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()), 2742 C == LHS ? D : C, Q, MaxRecurse - 1)) 2743 return V; 2744 2745 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow. 2746 if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem && 2747 NoRHSWrapProblem) { 2748 // Determine Y and Z in the form icmp (X+Y), (X+Z). 2749 Value *Y, *Z; 2750 if (A == C) { 2751 // C + B == C + D -> B == D 2752 Y = B; 2753 Z = D; 2754 } else if (A == D) { 2755 // D + B == C + D -> B == C 2756 Y = B; 2757 Z = C; 2758 } else if (B == C) { 2759 // A + C == C + D -> A == D 2760 Y = A; 2761 Z = D; 2762 } else { 2763 assert(B == D); 2764 // A + D == C + D -> A == C 2765 Y = A; 2766 Z = C; 2767 } 2768 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1)) 2769 return V; 2770 } 2771 } 2772 2773 { 2774 Value *Y = nullptr; 2775 // icmp pred (or X, Y), X 2776 if (LBO && match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) { 2777 if (Pred == ICmpInst::ICMP_ULT) 2778 return getFalse(ITy); 2779 if (Pred == ICmpInst::ICMP_UGE) 2780 return getTrue(ITy); 2781 2782 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) { 2783 KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2784 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2785 if (RHSKnown.isNonNegative() && YKnown.isNegative()) 2786 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy); 2787 if (RHSKnown.isNegative() || YKnown.isNonNegative()) 2788 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy); 2789 } 2790 } 2791 // icmp pred X, (or X, Y) 2792 if (RBO && match(RBO, m_c_Or(m_Value(Y), m_Specific(LHS)))) { 2793 if (Pred == ICmpInst::ICMP_ULE) 2794 return getTrue(ITy); 2795 if (Pred == ICmpInst::ICMP_UGT) 2796 return getFalse(ITy); 2797 2798 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) { 2799 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2800 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2801 if (LHSKnown.isNonNegative() && YKnown.isNegative()) 2802 return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy); 2803 if (LHSKnown.isNegative() || YKnown.isNonNegative()) 2804 return Pred == ICmpInst::ICMP_SGT ? getFalse(ITy) : getTrue(ITy); 2805 } 2806 } 2807 } 2808 2809 // icmp pred (and X, Y), X 2810 if (LBO && match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) { 2811 if (Pred == ICmpInst::ICMP_UGT) 2812 return getFalse(ITy); 2813 if (Pred == ICmpInst::ICMP_ULE) 2814 return getTrue(ITy); 2815 } 2816 // icmp pred X, (and X, Y) 2817 if (RBO && match(RBO, m_c_And(m_Value(), m_Specific(LHS)))) { 2818 if (Pred == ICmpInst::ICMP_UGE) 2819 return getTrue(ITy); 2820 if (Pred == ICmpInst::ICMP_ULT) 2821 return getFalse(ITy); 2822 } 2823 2824 // 0 - (zext X) pred C 2825 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) { 2826 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) { 2827 if (RHSC->getValue().isStrictlyPositive()) { 2828 if (Pred == ICmpInst::ICMP_SLT) 2829 return ConstantInt::getTrue(RHSC->getContext()); 2830 if (Pred == ICmpInst::ICMP_SGE) 2831 return ConstantInt::getFalse(RHSC->getContext()); 2832 if (Pred == ICmpInst::ICMP_EQ) 2833 return ConstantInt::getFalse(RHSC->getContext()); 2834 if (Pred == ICmpInst::ICMP_NE) 2835 return ConstantInt::getTrue(RHSC->getContext()); 2836 } 2837 if (RHSC->getValue().isNonNegative()) { 2838 if (Pred == ICmpInst::ICMP_SLE) 2839 return ConstantInt::getTrue(RHSC->getContext()); 2840 if (Pred == ICmpInst::ICMP_SGT) 2841 return ConstantInt::getFalse(RHSC->getContext()); 2842 } 2843 } 2844 } 2845 2846 // icmp pred (urem X, Y), Y 2847 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) { 2848 switch (Pred) { 2849 default: 2850 break; 2851 case ICmpInst::ICMP_SGT: 2852 case ICmpInst::ICMP_SGE: { 2853 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2854 if (!Known.isNonNegative()) 2855 break; 2856 LLVM_FALLTHROUGH; 2857 } 2858 case ICmpInst::ICMP_EQ: 2859 case ICmpInst::ICMP_UGT: 2860 case ICmpInst::ICMP_UGE: 2861 return getFalse(ITy); 2862 case ICmpInst::ICMP_SLT: 2863 case ICmpInst::ICMP_SLE: { 2864 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2865 if (!Known.isNonNegative()) 2866 break; 2867 LLVM_FALLTHROUGH; 2868 } 2869 case ICmpInst::ICMP_NE: 2870 case ICmpInst::ICMP_ULT: 2871 case ICmpInst::ICMP_ULE: 2872 return getTrue(ITy); 2873 } 2874 } 2875 2876 // icmp pred X, (urem Y, X) 2877 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) { 2878 switch (Pred) { 2879 default: 2880 break; 2881 case ICmpInst::ICMP_SGT: 2882 case ICmpInst::ICMP_SGE: { 2883 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2884 if (!Known.isNonNegative()) 2885 break; 2886 LLVM_FALLTHROUGH; 2887 } 2888 case ICmpInst::ICMP_NE: 2889 case ICmpInst::ICMP_UGT: 2890 case ICmpInst::ICMP_UGE: 2891 return getTrue(ITy); 2892 case ICmpInst::ICMP_SLT: 2893 case ICmpInst::ICMP_SLE: { 2894 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2895 if (!Known.isNonNegative()) 2896 break; 2897 LLVM_FALLTHROUGH; 2898 } 2899 case ICmpInst::ICMP_EQ: 2900 case ICmpInst::ICMP_ULT: 2901 case ICmpInst::ICMP_ULE: 2902 return getFalse(ITy); 2903 } 2904 } 2905 2906 // x >> y <=u x 2907 // x udiv y <=u x. 2908 if (LBO && (match(LBO, m_LShr(m_Specific(RHS), m_Value())) || 2909 match(LBO, m_UDiv(m_Specific(RHS), m_Value())))) { 2910 // icmp pred (X op Y), X 2911 if (Pred == ICmpInst::ICMP_UGT) 2912 return getFalse(ITy); 2913 if (Pred == ICmpInst::ICMP_ULE) 2914 return getTrue(ITy); 2915 } 2916 2917 // x >=u x >> y 2918 // x >=u x udiv y. 2919 if (RBO && (match(RBO, m_LShr(m_Specific(LHS), m_Value())) || 2920 match(RBO, m_UDiv(m_Specific(LHS), m_Value())))) { 2921 // icmp pred X, (X op Y) 2922 if (Pred == ICmpInst::ICMP_ULT) 2923 return getFalse(ITy); 2924 if (Pred == ICmpInst::ICMP_UGE) 2925 return getTrue(ITy); 2926 } 2927 2928 // handle: 2929 // CI2 << X == CI 2930 // CI2 << X != CI 2931 // 2932 // where CI2 is a power of 2 and CI isn't 2933 if (auto *CI = dyn_cast<ConstantInt>(RHS)) { 2934 const APInt *CI2Val, *CIVal = &CI->getValue(); 2935 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) && 2936 CI2Val->isPowerOf2()) { 2937 if (!CIVal->isPowerOf2()) { 2938 // CI2 << X can equal zero in some circumstances, 2939 // this simplification is unsafe if CI is zero. 2940 // 2941 // We know it is safe if: 2942 // - The shift is nsw, we can't shift out the one bit. 2943 // - The shift is nuw, we can't shift out the one bit. 2944 // - CI2 is one 2945 // - CI isn't zero 2946 if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) || 2947 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) || 2948 CI2Val->isOneValue() || !CI->isZero()) { 2949 if (Pred == ICmpInst::ICMP_EQ) 2950 return ConstantInt::getFalse(RHS->getContext()); 2951 if (Pred == ICmpInst::ICMP_NE) 2952 return ConstantInt::getTrue(RHS->getContext()); 2953 } 2954 } 2955 if (CIVal->isSignMask() && CI2Val->isOneValue()) { 2956 if (Pred == ICmpInst::ICMP_UGT) 2957 return ConstantInt::getFalse(RHS->getContext()); 2958 if (Pred == ICmpInst::ICMP_ULE) 2959 return ConstantInt::getTrue(RHS->getContext()); 2960 } 2961 } 2962 } 2963 2964 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() && 2965 LBO->getOperand(1) == RBO->getOperand(1)) { 2966 switch (LBO->getOpcode()) { 2967 default: 2968 break; 2969 case Instruction::UDiv: 2970 case Instruction::LShr: 2971 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) || 2972 !Q.IIQ.isExact(RBO)) 2973 break; 2974 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2975 RBO->getOperand(0), Q, MaxRecurse - 1)) 2976 return V; 2977 break; 2978 case Instruction::SDiv: 2979 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) || 2980 !Q.IIQ.isExact(RBO)) 2981 break; 2982 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2983 RBO->getOperand(0), Q, MaxRecurse - 1)) 2984 return V; 2985 break; 2986 case Instruction::AShr: 2987 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO)) 2988 break; 2989 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2990 RBO->getOperand(0), Q, MaxRecurse - 1)) 2991 return V; 2992 break; 2993 case Instruction::Shl: { 2994 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO); 2995 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO); 2996 if (!NUW && !NSW) 2997 break; 2998 if (!NSW && ICmpInst::isSigned(Pred)) 2999 break; 3000 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 3001 RBO->getOperand(0), Q, MaxRecurse - 1)) 3002 return V; 3003 break; 3004 } 3005 } 3006 } 3007 return nullptr; 3008 } 3009 3010 /// Simplify integer comparisons where at least one operand of the compare 3011 /// matches an integer min/max idiom. 3012 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS, 3013 Value *RHS, const SimplifyQuery &Q, 3014 unsigned MaxRecurse) { 3015 Type *ITy = GetCompareTy(LHS); // The return type. 3016 Value *A, *B; 3017 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE; 3018 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B". 3019 3020 // Signed variants on "max(a,b)>=a -> true". 3021 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 3022 if (A != RHS) 3023 std::swap(A, B); // smax(A, B) pred A. 3024 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 3025 // We analyze this as smax(A, B) pred A. 3026 P = Pred; 3027 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) && 3028 (A == LHS || B == LHS)) { 3029 if (A != LHS) 3030 std::swap(A, B); // A pred smax(A, B). 3031 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 3032 // We analyze this as smax(A, B) swapped-pred A. 3033 P = CmpInst::getSwappedPredicate(Pred); 3034 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 3035 (A == RHS || B == RHS)) { 3036 if (A != RHS) 3037 std::swap(A, B); // smin(A, B) pred A. 3038 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 3039 // We analyze this as smax(-A, -B) swapped-pred -A. 3040 // Note that we do not need to actually form -A or -B thanks to EqP. 3041 P = CmpInst::getSwappedPredicate(Pred); 3042 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) && 3043 (A == LHS || B == LHS)) { 3044 if (A != LHS) 3045 std::swap(A, B); // A pred smin(A, B). 3046 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 3047 // We analyze this as smax(-A, -B) pred -A. 3048 // Note that we do not need to actually form -A or -B thanks to EqP. 3049 P = Pred; 3050 } 3051 if (P != CmpInst::BAD_ICMP_PREDICATE) { 3052 // Cases correspond to "max(A, B) p A". 3053 switch (P) { 3054 default: 3055 break; 3056 case CmpInst::ICMP_EQ: 3057 case CmpInst::ICMP_SLE: 3058 // Equivalent to "A EqP B". This may be the same as the condition tested 3059 // in the max/min; if so, we can just return that. 3060 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 3061 return V; 3062 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 3063 return V; 3064 // Otherwise, see if "A EqP B" simplifies. 3065 if (MaxRecurse) 3066 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1)) 3067 return V; 3068 break; 3069 case CmpInst::ICMP_NE: 3070 case CmpInst::ICMP_SGT: { 3071 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 3072 // Equivalent to "A InvEqP B". This may be the same as the condition 3073 // tested in the max/min; if so, we can just return that. 3074 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 3075 return V; 3076 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 3077 return V; 3078 // Otherwise, see if "A InvEqP B" simplifies. 3079 if (MaxRecurse) 3080 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1)) 3081 return V; 3082 break; 3083 } 3084 case CmpInst::ICMP_SGE: 3085 // Always true. 3086 return getTrue(ITy); 3087 case CmpInst::ICMP_SLT: 3088 // Always false. 3089 return getFalse(ITy); 3090 } 3091 } 3092 3093 // Unsigned variants on "max(a,b)>=a -> true". 3094 P = CmpInst::BAD_ICMP_PREDICATE; 3095 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 3096 if (A != RHS) 3097 std::swap(A, B); // umax(A, B) pred A. 3098 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 3099 // We analyze this as umax(A, B) pred A. 3100 P = Pred; 3101 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) && 3102 (A == LHS || B == LHS)) { 3103 if (A != LHS) 3104 std::swap(A, B); // A pred umax(A, B). 3105 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 3106 // We analyze this as umax(A, B) swapped-pred A. 3107 P = CmpInst::getSwappedPredicate(Pred); 3108 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 3109 (A == RHS || B == RHS)) { 3110 if (A != RHS) 3111 std::swap(A, B); // umin(A, B) pred A. 3112 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 3113 // We analyze this as umax(-A, -B) swapped-pred -A. 3114 // Note that we do not need to actually form -A or -B thanks to EqP. 3115 P = CmpInst::getSwappedPredicate(Pred); 3116 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) && 3117 (A == LHS || B == LHS)) { 3118 if (A != LHS) 3119 std::swap(A, B); // A pred umin(A, B). 3120 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 3121 // We analyze this as umax(-A, -B) pred -A. 3122 // Note that we do not need to actually form -A or -B thanks to EqP. 3123 P = Pred; 3124 } 3125 if (P != CmpInst::BAD_ICMP_PREDICATE) { 3126 // Cases correspond to "max(A, B) p A". 3127 switch (P) { 3128 default: 3129 break; 3130 case CmpInst::ICMP_EQ: 3131 case CmpInst::ICMP_ULE: 3132 // Equivalent to "A EqP B". This may be the same as the condition tested 3133 // in the max/min; if so, we can just return that. 3134 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 3135 return V; 3136 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 3137 return V; 3138 // Otherwise, see if "A EqP B" simplifies. 3139 if (MaxRecurse) 3140 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1)) 3141 return V; 3142 break; 3143 case CmpInst::ICMP_NE: 3144 case CmpInst::ICMP_UGT: { 3145 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 3146 // Equivalent to "A InvEqP B". This may be the same as the condition 3147 // tested in the max/min; if so, we can just return that. 3148 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 3149 return V; 3150 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 3151 return V; 3152 // Otherwise, see if "A InvEqP B" simplifies. 3153 if (MaxRecurse) 3154 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1)) 3155 return V; 3156 break; 3157 } 3158 case CmpInst::ICMP_UGE: 3159 // Always true. 3160 return getTrue(ITy); 3161 case CmpInst::ICMP_ULT: 3162 // Always false. 3163 return getFalse(ITy); 3164 } 3165 } 3166 3167 // Variants on "max(x,y) >= min(x,z)". 3168 Value *C, *D; 3169 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && 3170 match(RHS, m_SMin(m_Value(C), m_Value(D))) && 3171 (A == C || A == D || B == C || B == D)) { 3172 // max(x, ?) pred min(x, ?). 3173 if (Pred == CmpInst::ICMP_SGE) 3174 // Always true. 3175 return getTrue(ITy); 3176 if (Pred == CmpInst::ICMP_SLT) 3177 // Always false. 3178 return getFalse(ITy); 3179 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 3180 match(RHS, m_SMax(m_Value(C), m_Value(D))) && 3181 (A == C || A == D || B == C || B == D)) { 3182 // min(x, ?) pred max(x, ?). 3183 if (Pred == CmpInst::ICMP_SLE) 3184 // Always true. 3185 return getTrue(ITy); 3186 if (Pred == CmpInst::ICMP_SGT) 3187 // Always false. 3188 return getFalse(ITy); 3189 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && 3190 match(RHS, m_UMin(m_Value(C), m_Value(D))) && 3191 (A == C || A == D || B == C || B == D)) { 3192 // max(x, ?) pred min(x, ?). 3193 if (Pred == CmpInst::ICMP_UGE) 3194 // Always true. 3195 return getTrue(ITy); 3196 if (Pred == CmpInst::ICMP_ULT) 3197 // Always false. 3198 return getFalse(ITy); 3199 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 3200 match(RHS, m_UMax(m_Value(C), m_Value(D))) && 3201 (A == C || A == D || B == C || B == D)) { 3202 // min(x, ?) pred max(x, ?). 3203 if (Pred == CmpInst::ICMP_ULE) 3204 // Always true. 3205 return getTrue(ITy); 3206 if (Pred == CmpInst::ICMP_UGT) 3207 // Always false. 3208 return getFalse(ITy); 3209 } 3210 3211 return nullptr; 3212 } 3213 3214 /// Given operands for an ICmpInst, see if we can fold the result. 3215 /// If not, this returns null. 3216 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3217 const SimplifyQuery &Q, unsigned MaxRecurse) { 3218 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 3219 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!"); 3220 3221 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 3222 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 3223 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 3224 3225 // If we have a constant, make sure it is on the RHS. 3226 std::swap(LHS, RHS); 3227 Pred = CmpInst::getSwappedPredicate(Pred); 3228 } 3229 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X"); 3230 3231 Type *ITy = GetCompareTy(LHS); // The return type. 3232 3233 // For EQ and NE, we can always pick a value for the undef to make the 3234 // predicate pass or fail, so we can return undef. 3235 // Matches behavior in llvm::ConstantFoldCompareInstruction. 3236 if (isa<UndefValue>(RHS) && ICmpInst::isEquality(Pred)) 3237 return UndefValue::get(ITy); 3238 3239 // icmp X, X -> true/false 3240 // icmp X, undef -> true/false because undef could be X. 3241 if (LHS == RHS || isa<UndefValue>(RHS)) 3242 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred)); 3243 3244 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q)) 3245 return V; 3246 3247 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q)) 3248 return V; 3249 3250 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ)) 3251 return V; 3252 3253 // If both operands have range metadata, use the metadata 3254 // to simplify the comparison. 3255 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) { 3256 auto RHS_Instr = cast<Instruction>(RHS); 3257 auto LHS_Instr = cast<Instruction>(LHS); 3258 3259 if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) && 3260 Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) { 3261 auto RHS_CR = getConstantRangeFromMetadata( 3262 *RHS_Instr->getMetadata(LLVMContext::MD_range)); 3263 auto LHS_CR = getConstantRangeFromMetadata( 3264 *LHS_Instr->getMetadata(LLVMContext::MD_range)); 3265 3266 auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR); 3267 if (Satisfied_CR.contains(LHS_CR)) 3268 return ConstantInt::getTrue(RHS->getContext()); 3269 3270 auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion( 3271 CmpInst::getInversePredicate(Pred), RHS_CR); 3272 if (InversedSatisfied_CR.contains(LHS_CR)) 3273 return ConstantInt::getFalse(RHS->getContext()); 3274 } 3275 } 3276 3277 // Compare of cast, for example (zext X) != 0 -> X != 0 3278 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) { 3279 Instruction *LI = cast<CastInst>(LHS); 3280 Value *SrcOp = LI->getOperand(0); 3281 Type *SrcTy = SrcOp->getType(); 3282 Type *DstTy = LI->getType(); 3283 3284 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input 3285 // if the integer type is the same size as the pointer type. 3286 if (MaxRecurse && isa<PtrToIntInst>(LI) && 3287 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) { 3288 if (Constant *RHSC = dyn_cast<Constant>(RHS)) { 3289 // Transfer the cast to the constant. 3290 if (Value *V = SimplifyICmpInst(Pred, SrcOp, 3291 ConstantExpr::getIntToPtr(RHSC, SrcTy), 3292 Q, MaxRecurse-1)) 3293 return V; 3294 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) { 3295 if (RI->getOperand(0)->getType() == SrcTy) 3296 // Compare without the cast. 3297 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 3298 Q, MaxRecurse-1)) 3299 return V; 3300 } 3301 } 3302 3303 if (isa<ZExtInst>(LHS)) { 3304 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the 3305 // same type. 3306 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) { 3307 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 3308 // Compare X and Y. Note that signed predicates become unsigned. 3309 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 3310 SrcOp, RI->getOperand(0), Q, 3311 MaxRecurse-1)) 3312 return V; 3313 } 3314 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended 3315 // too. If not, then try to deduce the result of the comparison. 3316 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 3317 // Compute the constant that would happen if we truncated to SrcTy then 3318 // reextended to DstTy. 3319 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 3320 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy); 3321 3322 // If the re-extended constant didn't change then this is effectively 3323 // also a case of comparing two zero-extended values. 3324 if (RExt == CI && MaxRecurse) 3325 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 3326 SrcOp, Trunc, Q, MaxRecurse-1)) 3327 return V; 3328 3329 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit 3330 // there. Use this to work out the result of the comparison. 3331 if (RExt != CI) { 3332 switch (Pred) { 3333 default: llvm_unreachable("Unknown ICmp predicate!"); 3334 // LHS <u RHS. 3335 case ICmpInst::ICMP_EQ: 3336 case ICmpInst::ICMP_UGT: 3337 case ICmpInst::ICMP_UGE: 3338 return ConstantInt::getFalse(CI->getContext()); 3339 3340 case ICmpInst::ICMP_NE: 3341 case ICmpInst::ICMP_ULT: 3342 case ICmpInst::ICMP_ULE: 3343 return ConstantInt::getTrue(CI->getContext()); 3344 3345 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS 3346 // is non-negative then LHS <s RHS. 3347 case ICmpInst::ICMP_SGT: 3348 case ICmpInst::ICMP_SGE: 3349 return CI->getValue().isNegative() ? 3350 ConstantInt::getTrue(CI->getContext()) : 3351 ConstantInt::getFalse(CI->getContext()); 3352 3353 case ICmpInst::ICMP_SLT: 3354 case ICmpInst::ICMP_SLE: 3355 return CI->getValue().isNegative() ? 3356 ConstantInt::getFalse(CI->getContext()) : 3357 ConstantInt::getTrue(CI->getContext()); 3358 } 3359 } 3360 } 3361 } 3362 3363 if (isa<SExtInst>(LHS)) { 3364 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the 3365 // same type. 3366 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) { 3367 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 3368 // Compare X and Y. Note that the predicate does not change. 3369 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 3370 Q, MaxRecurse-1)) 3371 return V; 3372 } 3373 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended 3374 // too. If not, then try to deduce the result of the comparison. 3375 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 3376 // Compute the constant that would happen if we truncated to SrcTy then 3377 // reextended to DstTy. 3378 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 3379 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy); 3380 3381 // If the re-extended constant didn't change then this is effectively 3382 // also a case of comparing two sign-extended values. 3383 if (RExt == CI && MaxRecurse) 3384 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1)) 3385 return V; 3386 3387 // Otherwise the upper bits of LHS are all equal, while RHS has varying 3388 // bits there. Use this to work out the result of the comparison. 3389 if (RExt != CI) { 3390 switch (Pred) { 3391 default: llvm_unreachable("Unknown ICmp predicate!"); 3392 case ICmpInst::ICMP_EQ: 3393 return ConstantInt::getFalse(CI->getContext()); 3394 case ICmpInst::ICMP_NE: 3395 return ConstantInt::getTrue(CI->getContext()); 3396 3397 // If RHS is non-negative then LHS <s RHS. If RHS is negative then 3398 // LHS >s RHS. 3399 case ICmpInst::ICMP_SGT: 3400 case ICmpInst::ICMP_SGE: 3401 return CI->getValue().isNegative() ? 3402 ConstantInt::getTrue(CI->getContext()) : 3403 ConstantInt::getFalse(CI->getContext()); 3404 case ICmpInst::ICMP_SLT: 3405 case ICmpInst::ICMP_SLE: 3406 return CI->getValue().isNegative() ? 3407 ConstantInt::getFalse(CI->getContext()) : 3408 ConstantInt::getTrue(CI->getContext()); 3409 3410 // If LHS is non-negative then LHS <u RHS. If LHS is negative then 3411 // LHS >u RHS. 3412 case ICmpInst::ICMP_UGT: 3413 case ICmpInst::ICMP_UGE: 3414 // Comparison is true iff the LHS <s 0. 3415 if (MaxRecurse) 3416 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp, 3417 Constant::getNullValue(SrcTy), 3418 Q, MaxRecurse-1)) 3419 return V; 3420 break; 3421 case ICmpInst::ICMP_ULT: 3422 case ICmpInst::ICMP_ULE: 3423 // Comparison is true iff the LHS >=s 0. 3424 if (MaxRecurse) 3425 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp, 3426 Constant::getNullValue(SrcTy), 3427 Q, MaxRecurse-1)) 3428 return V; 3429 break; 3430 } 3431 } 3432 } 3433 } 3434 } 3435 3436 // icmp eq|ne X, Y -> false|true if X != Y 3437 if (ICmpInst::isEquality(Pred) && 3438 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) { 3439 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy); 3440 } 3441 3442 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse)) 3443 return V; 3444 3445 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse)) 3446 return V; 3447 3448 // Simplify comparisons of related pointers using a powerful, recursive 3449 // GEP-walk when we have target data available.. 3450 if (LHS->getType()->isPointerTy()) 3451 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, 3452 Q.IIQ, LHS, RHS)) 3453 return C; 3454 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS)) 3455 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS)) 3456 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) == 3457 Q.DL.getTypeSizeInBits(CLHS->getType()) && 3458 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) == 3459 Q.DL.getTypeSizeInBits(CRHS->getType())) 3460 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, 3461 Q.IIQ, CLHS->getPointerOperand(), 3462 CRHS->getPointerOperand())) 3463 return C; 3464 3465 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) { 3466 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) { 3467 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() && 3468 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() && 3469 (ICmpInst::isEquality(Pred) || 3470 (GLHS->isInBounds() && GRHS->isInBounds() && 3471 Pred == ICmpInst::getSignedPredicate(Pred)))) { 3472 // The bases are equal and the indices are constant. Build a constant 3473 // expression GEP with the same indices and a null base pointer to see 3474 // what constant folding can make out of it. 3475 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType()); 3476 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end()); 3477 Constant *NewLHS = ConstantExpr::getGetElementPtr( 3478 GLHS->getSourceElementType(), Null, IndicesLHS); 3479 3480 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end()); 3481 Constant *NewRHS = ConstantExpr::getGetElementPtr( 3482 GLHS->getSourceElementType(), Null, IndicesRHS); 3483 Constant *NewICmp = ConstantExpr::getICmp(Pred, NewLHS, NewRHS); 3484 return ConstantFoldConstant(NewICmp, Q.DL); 3485 } 3486 } 3487 } 3488 3489 // If the comparison is with the result of a select instruction, check whether 3490 // comparing with either branch of the select always yields the same value. 3491 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 3492 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 3493 return V; 3494 3495 // If the comparison is with the result of a phi instruction, check whether 3496 // doing the compare with each incoming phi value yields a common result. 3497 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 3498 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 3499 return V; 3500 3501 return nullptr; 3502 } 3503 3504 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3505 const SimplifyQuery &Q) { 3506 return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit); 3507 } 3508 3509 /// Given operands for an FCmpInst, see if we can fold the result. 3510 /// If not, this returns null. 3511 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3512 FastMathFlags FMF, const SimplifyQuery &Q, 3513 unsigned MaxRecurse) { 3514 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 3515 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!"); 3516 3517 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 3518 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 3519 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 3520 3521 // If we have a constant, make sure it is on the RHS. 3522 std::swap(LHS, RHS); 3523 Pred = CmpInst::getSwappedPredicate(Pred); 3524 } 3525 3526 // Fold trivial predicates. 3527 Type *RetTy = GetCompareTy(LHS); 3528 if (Pred == FCmpInst::FCMP_FALSE) 3529 return getFalse(RetTy); 3530 if (Pred == FCmpInst::FCMP_TRUE) 3531 return getTrue(RetTy); 3532 3533 // Fold (un)ordered comparison if we can determine there are no NaNs. 3534 if (Pred == FCmpInst::FCMP_UNO || Pred == FCmpInst::FCMP_ORD) 3535 if (FMF.noNaNs() || 3536 (isKnownNeverNaN(LHS, Q.TLI) && isKnownNeverNaN(RHS, Q.TLI))) 3537 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD); 3538 3539 // NaN is unordered; NaN is not ordered. 3540 assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) && 3541 "Comparison must be either ordered or unordered"); 3542 if (match(RHS, m_NaN())) 3543 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred)); 3544 3545 // fcmp pred x, undef and fcmp pred undef, x 3546 // fold to true if unordered, false if ordered 3547 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) { 3548 // Choosing NaN for the undef will always make unordered comparison succeed 3549 // and ordered comparison fail. 3550 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred)); 3551 } 3552 3553 // fcmp x,x -> true/false. Not all compares are foldable. 3554 if (LHS == RHS) { 3555 if (CmpInst::isTrueWhenEqual(Pred)) 3556 return getTrue(RetTy); 3557 if (CmpInst::isFalseWhenEqual(Pred)) 3558 return getFalse(RetTy); 3559 } 3560 3561 // Handle fcmp with constant RHS. 3562 // TODO: Use match with a specific FP value, so these work with vectors with 3563 // undef lanes. 3564 const APFloat *C; 3565 if (match(RHS, m_APFloat(C))) { 3566 // Check whether the constant is an infinity. 3567 if (C->isInfinity()) { 3568 if (C->isNegative()) { 3569 switch (Pred) { 3570 case FCmpInst::FCMP_OLT: 3571 // No value is ordered and less than negative infinity. 3572 return getFalse(RetTy); 3573 case FCmpInst::FCMP_UGE: 3574 // All values are unordered with or at least negative infinity. 3575 return getTrue(RetTy); 3576 default: 3577 break; 3578 } 3579 } else { 3580 switch (Pred) { 3581 case FCmpInst::FCMP_OGT: 3582 // No value is ordered and greater than infinity. 3583 return getFalse(RetTy); 3584 case FCmpInst::FCMP_ULE: 3585 // All values are unordered with and at most infinity. 3586 return getTrue(RetTy); 3587 default: 3588 break; 3589 } 3590 } 3591 } 3592 if (C->isNegative() && !C->isNegZero()) { 3593 assert(!C->isNaN() && "Unexpected NaN constant!"); 3594 // TODO: We can catch more cases by using a range check rather than 3595 // relying on CannotBeOrderedLessThanZero. 3596 switch (Pred) { 3597 case FCmpInst::FCMP_UGE: 3598 case FCmpInst::FCMP_UGT: 3599 case FCmpInst::FCMP_UNE: 3600 // (X >= 0) implies (X > C) when (C < 0) 3601 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3602 return getTrue(RetTy); 3603 break; 3604 case FCmpInst::FCMP_OEQ: 3605 case FCmpInst::FCMP_OLE: 3606 case FCmpInst::FCMP_OLT: 3607 // (X >= 0) implies !(X < C) when (C < 0) 3608 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3609 return getFalse(RetTy); 3610 break; 3611 default: 3612 break; 3613 } 3614 } 3615 3616 // Check comparison of [minnum/maxnum with constant] with other constant. 3617 const APFloat *C2; 3618 if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) && 3619 *C2 < *C) || 3620 (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) && 3621 *C2 > *C)) { 3622 bool IsMaxNum = 3623 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum; 3624 // The ordered relationship and minnum/maxnum guarantee that we do not 3625 // have NaN constants, so ordered/unordered preds are handled the same. 3626 switch (Pred) { 3627 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_UEQ: 3628 // minnum(X, LesserC) == C --> false 3629 // maxnum(X, GreaterC) == C --> false 3630 return getFalse(RetTy); 3631 case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_UNE: 3632 // minnum(X, LesserC) != C --> true 3633 // maxnum(X, GreaterC) != C --> true 3634 return getTrue(RetTy); 3635 case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_UGE: 3636 case FCmpInst::FCMP_OGT: case FCmpInst::FCMP_UGT: 3637 // minnum(X, LesserC) >= C --> false 3638 // minnum(X, LesserC) > C --> false 3639 // maxnum(X, GreaterC) >= C --> true 3640 // maxnum(X, GreaterC) > C --> true 3641 return ConstantInt::get(RetTy, IsMaxNum); 3642 case FCmpInst::FCMP_OLE: case FCmpInst::FCMP_ULE: 3643 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_ULT: 3644 // minnum(X, LesserC) <= C --> true 3645 // minnum(X, LesserC) < C --> true 3646 // maxnum(X, GreaterC) <= C --> false 3647 // maxnum(X, GreaterC) < C --> false 3648 return ConstantInt::get(RetTy, !IsMaxNum); 3649 default: 3650 // TRUE/FALSE/ORD/UNO should be handled before this. 3651 llvm_unreachable("Unexpected fcmp predicate"); 3652 } 3653 } 3654 } 3655 3656 if (match(RHS, m_AnyZeroFP())) { 3657 switch (Pred) { 3658 case FCmpInst::FCMP_OGE: 3659 case FCmpInst::FCMP_ULT: 3660 // Positive or zero X >= 0.0 --> true 3661 // Positive or zero X < 0.0 --> false 3662 if ((FMF.noNaNs() || isKnownNeverNaN(LHS, Q.TLI)) && 3663 CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3664 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy); 3665 break; 3666 case FCmpInst::FCMP_UGE: 3667 case FCmpInst::FCMP_OLT: 3668 // Positive or zero or nan X >= 0.0 --> true 3669 // Positive or zero or nan X < 0.0 --> false 3670 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3671 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy); 3672 break; 3673 default: 3674 break; 3675 } 3676 } 3677 3678 // If the comparison is with the result of a select instruction, check whether 3679 // comparing with either branch of the select always yields the same value. 3680 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 3681 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 3682 return V; 3683 3684 // If the comparison is with the result of a phi instruction, check whether 3685 // doing the compare with each incoming phi value yields a common result. 3686 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 3687 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 3688 return V; 3689 3690 return nullptr; 3691 } 3692 3693 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3694 FastMathFlags FMF, const SimplifyQuery &Q) { 3695 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit); 3696 } 3697 3698 /// See if V simplifies when its operand Op is replaced with RepOp. 3699 static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, 3700 const SimplifyQuery &Q, 3701 unsigned MaxRecurse) { 3702 // Trivial replacement. 3703 if (V == Op) 3704 return RepOp; 3705 3706 // We cannot replace a constant, and shouldn't even try. 3707 if (isa<Constant>(Op)) 3708 return nullptr; 3709 3710 auto *I = dyn_cast<Instruction>(V); 3711 if (!I) 3712 return nullptr; 3713 3714 // If this is a binary operator, try to simplify it with the replaced op. 3715 if (auto *B = dyn_cast<BinaryOperator>(I)) { 3716 // Consider: 3717 // %cmp = icmp eq i32 %x, 2147483647 3718 // %add = add nsw i32 %x, 1 3719 // %sel = select i1 %cmp, i32 -2147483648, i32 %add 3720 // 3721 // We can't replace %sel with %add unless we strip away the flags. 3722 // TODO: This is an unusual limitation because better analysis results in 3723 // worse simplification. InstCombine can do this fold more generally 3724 // by dropping the flags. Remove this fold to save compile-time? 3725 if (isa<OverflowingBinaryOperator>(B)) 3726 if (Q.IIQ.hasNoSignedWrap(B) || Q.IIQ.hasNoUnsignedWrap(B)) 3727 return nullptr; 3728 if (isa<PossiblyExactOperator>(B) && Q.IIQ.isExact(B)) 3729 return nullptr; 3730 3731 if (MaxRecurse) { 3732 if (B->getOperand(0) == Op) 3733 return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q, 3734 MaxRecurse - 1); 3735 if (B->getOperand(1) == Op) 3736 return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q, 3737 MaxRecurse - 1); 3738 } 3739 } 3740 3741 // Same for CmpInsts. 3742 if (CmpInst *C = dyn_cast<CmpInst>(I)) { 3743 if (MaxRecurse) { 3744 if (C->getOperand(0) == Op) 3745 return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q, 3746 MaxRecurse - 1); 3747 if (C->getOperand(1) == Op) 3748 return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q, 3749 MaxRecurse - 1); 3750 } 3751 } 3752 3753 // Same for GEPs. 3754 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 3755 if (MaxRecurse) { 3756 SmallVector<Value *, 8> NewOps(GEP->getNumOperands()); 3757 transform(GEP->operands(), NewOps.begin(), 3758 [&](Value *V) { return V == Op ? RepOp : V; }); 3759 return SimplifyGEPInst(GEP->getSourceElementType(), NewOps, Q, 3760 MaxRecurse - 1); 3761 } 3762 } 3763 3764 // TODO: We could hand off more cases to instsimplify here. 3765 3766 // If all operands are constant after substituting Op for RepOp then we can 3767 // constant fold the instruction. 3768 if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) { 3769 // Build a list of all constant operands. 3770 SmallVector<Constant *, 8> ConstOps; 3771 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3772 if (I->getOperand(i) == Op) 3773 ConstOps.push_back(CRepOp); 3774 else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i))) 3775 ConstOps.push_back(COp); 3776 else 3777 break; 3778 } 3779 3780 // All operands were constants, fold it. 3781 if (ConstOps.size() == I->getNumOperands()) { 3782 if (CmpInst *C = dyn_cast<CmpInst>(I)) 3783 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0], 3784 ConstOps[1], Q.DL, Q.TLI); 3785 3786 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 3787 if (!LI->isVolatile()) 3788 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL); 3789 3790 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI); 3791 } 3792 } 3793 3794 return nullptr; 3795 } 3796 3797 /// Try to simplify a select instruction when its condition operand is an 3798 /// integer comparison where one operand of the compare is a constant. 3799 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, 3800 const APInt *Y, bool TrueWhenUnset) { 3801 const APInt *C; 3802 3803 // (X & Y) == 0 ? X & ~Y : X --> X 3804 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y 3805 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) && 3806 *Y == ~*C) 3807 return TrueWhenUnset ? FalseVal : TrueVal; 3808 3809 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y 3810 // (X & Y) != 0 ? X : X & ~Y --> X 3811 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) && 3812 *Y == ~*C) 3813 return TrueWhenUnset ? FalseVal : TrueVal; 3814 3815 if (Y->isPowerOf2()) { 3816 // (X & Y) == 0 ? X | Y : X --> X | Y 3817 // (X & Y) != 0 ? X | Y : X --> X 3818 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) && 3819 *Y == *C) 3820 return TrueWhenUnset ? TrueVal : FalseVal; 3821 3822 // (X & Y) == 0 ? X : X | Y --> X 3823 // (X & Y) != 0 ? X : X | Y --> X | Y 3824 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) && 3825 *Y == *C) 3826 return TrueWhenUnset ? TrueVal : FalseVal; 3827 } 3828 3829 return nullptr; 3830 } 3831 3832 /// An alternative way to test if a bit is set or not uses sgt/slt instead of 3833 /// eq/ne. 3834 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS, 3835 ICmpInst::Predicate Pred, 3836 Value *TrueVal, Value *FalseVal) { 3837 Value *X; 3838 APInt Mask; 3839 if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask)) 3840 return nullptr; 3841 3842 return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask, 3843 Pred == ICmpInst::ICMP_EQ); 3844 } 3845 3846 /// Try to simplify a select instruction when its condition operand is an 3847 /// integer comparison. 3848 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, 3849 Value *FalseVal, const SimplifyQuery &Q, 3850 unsigned MaxRecurse) { 3851 ICmpInst::Predicate Pred; 3852 Value *CmpLHS, *CmpRHS; 3853 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS)))) 3854 return nullptr; 3855 3856 if (ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero())) { 3857 Value *X; 3858 const APInt *Y; 3859 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y)))) 3860 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y, 3861 Pred == ICmpInst::ICMP_EQ)) 3862 return V; 3863 3864 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate. 3865 Value *ShAmt; 3866 auto isFsh = m_CombineOr(m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), 3867 m_Value(ShAmt)), 3868 m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), 3869 m_Value(ShAmt))); 3870 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X 3871 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X 3872 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt && 3873 Pred == ICmpInst::ICMP_EQ) 3874 return X; 3875 // (ShAmt != 0) ? X : fshl(X, *, ShAmt) --> X 3876 // (ShAmt != 0) ? X : fshr(*, X, ShAmt) --> X 3877 if (match(FalseVal, isFsh) && TrueVal == X && CmpLHS == ShAmt && 3878 Pred == ICmpInst::ICMP_NE) 3879 return X; 3880 3881 // Test for a zero-shift-guard-op around rotates. These are used to 3882 // avoid UB from oversized shifts in raw IR rotate patterns, but the 3883 // intrinsics do not have that problem. 3884 // We do not allow this transform for the general funnel shift case because 3885 // that would not preserve the poison safety of the original code. 3886 auto isRotate = m_CombineOr(m_Intrinsic<Intrinsic::fshl>(m_Value(X), 3887 m_Deferred(X), 3888 m_Value(ShAmt)), 3889 m_Intrinsic<Intrinsic::fshr>(m_Value(X), 3890 m_Deferred(X), 3891 m_Value(ShAmt))); 3892 // (ShAmt != 0) ? fshl(X, X, ShAmt) : X --> fshl(X, X, ShAmt) 3893 // (ShAmt != 0) ? fshr(X, X, ShAmt) : X --> fshr(X, X, ShAmt) 3894 if (match(TrueVal, isRotate) && FalseVal == X && CmpLHS == ShAmt && 3895 Pred == ICmpInst::ICMP_NE) 3896 return TrueVal; 3897 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt) 3898 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt) 3899 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt && 3900 Pred == ICmpInst::ICMP_EQ) 3901 return FalseVal; 3902 } 3903 3904 // Check for other compares that behave like bit test. 3905 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred, 3906 TrueVal, FalseVal)) 3907 return V; 3908 3909 // If we have an equality comparison, then we know the value in one of the 3910 // arms of the select. See if substituting this value into the arm and 3911 // simplifying the result yields the same value as the other arm. 3912 if (Pred == ICmpInst::ICMP_EQ) { 3913 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3914 TrueVal || 3915 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3916 TrueVal) 3917 return FalseVal; 3918 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3919 FalseVal || 3920 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3921 FalseVal) 3922 return FalseVal; 3923 } else if (Pred == ICmpInst::ICMP_NE) { 3924 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3925 FalseVal || 3926 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3927 FalseVal) 3928 return TrueVal; 3929 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3930 TrueVal || 3931 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3932 TrueVal) 3933 return TrueVal; 3934 } 3935 3936 return nullptr; 3937 } 3938 3939 /// Try to simplify a select instruction when its condition operand is a 3940 /// floating-point comparison. 3941 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, 3942 const SimplifyQuery &Q) { 3943 FCmpInst::Predicate Pred; 3944 if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) && 3945 !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T)))) 3946 return nullptr; 3947 3948 // This transform is safe if we do not have (do not care about) -0.0 or if 3949 // at least one operand is known to not be -0.0. Otherwise, the select can 3950 // change the sign of a zero operand. 3951 bool HasNoSignedZeros = Q.CxtI && isa<FPMathOperator>(Q.CxtI) && 3952 Q.CxtI->hasNoSignedZeros(); 3953 const APFloat *C; 3954 if (HasNoSignedZeros || (match(T, m_APFloat(C)) && C->isNonZero()) || 3955 (match(F, m_APFloat(C)) && C->isNonZero())) { 3956 // (T == F) ? T : F --> F 3957 // (F == T) ? T : F --> F 3958 if (Pred == FCmpInst::FCMP_OEQ) 3959 return F; 3960 3961 // (T != F) ? T : F --> T 3962 // (F != T) ? T : F --> T 3963 if (Pred == FCmpInst::FCMP_UNE) 3964 return T; 3965 } 3966 3967 return nullptr; 3968 } 3969 3970 /// Given operands for a SelectInst, see if we can fold the result. 3971 /// If not, this returns null. 3972 static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, 3973 const SimplifyQuery &Q, unsigned MaxRecurse) { 3974 if (auto *CondC = dyn_cast<Constant>(Cond)) { 3975 if (auto *TrueC = dyn_cast<Constant>(TrueVal)) 3976 if (auto *FalseC = dyn_cast<Constant>(FalseVal)) 3977 return ConstantFoldSelectInstruction(CondC, TrueC, FalseC); 3978 3979 // select undef, X, Y -> X or Y 3980 if (isa<UndefValue>(CondC)) 3981 return isa<Constant>(FalseVal) ? FalseVal : TrueVal; 3982 3983 // TODO: Vector constants with undef elements don't simplify. 3984 3985 // select true, X, Y -> X 3986 if (CondC->isAllOnesValue()) 3987 return TrueVal; 3988 // select false, X, Y -> Y 3989 if (CondC->isNullValue()) 3990 return FalseVal; 3991 } 3992 3993 // select i1 Cond, i1 true, i1 false --> i1 Cond 3994 assert(Cond->getType()->isIntOrIntVectorTy(1) && 3995 "Select must have bool or bool vector condition"); 3996 assert(TrueVal->getType() == FalseVal->getType() && 3997 "Select must have same types for true/false ops"); 3998 if (Cond->getType() == TrueVal->getType() && 3999 match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt())) 4000 return Cond; 4001 4002 // select ?, X, X -> X 4003 if (TrueVal == FalseVal) 4004 return TrueVal; 4005 4006 if (isa<UndefValue>(TrueVal)) // select ?, undef, X -> X 4007 return FalseVal; 4008 if (isa<UndefValue>(FalseVal)) // select ?, X, undef -> X 4009 return TrueVal; 4010 4011 // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC'' 4012 Constant *TrueC, *FalseC; 4013 if (TrueVal->getType()->isVectorTy() && match(TrueVal, m_Constant(TrueC)) && 4014 match(FalseVal, m_Constant(FalseC))) { 4015 unsigned NumElts = TrueC->getType()->getVectorNumElements(); 4016 SmallVector<Constant *, 16> NewC; 4017 for (unsigned i = 0; i != NumElts; ++i) { 4018 // Bail out on incomplete vector constants. 4019 Constant *TEltC = TrueC->getAggregateElement(i); 4020 Constant *FEltC = FalseC->getAggregateElement(i); 4021 if (!TEltC || !FEltC) 4022 break; 4023 4024 // If the elements match (undef or not), that value is the result. If only 4025 // one element is undef, choose the defined element as the safe result. 4026 if (TEltC == FEltC) 4027 NewC.push_back(TEltC); 4028 else if (isa<UndefValue>(TEltC)) 4029 NewC.push_back(FEltC); 4030 else if (isa<UndefValue>(FEltC)) 4031 NewC.push_back(TEltC); 4032 else 4033 break; 4034 } 4035 if (NewC.size() == NumElts) 4036 return ConstantVector::get(NewC); 4037 } 4038 4039 if (Value *V = 4040 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse)) 4041 return V; 4042 4043 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q)) 4044 return V; 4045 4046 if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal)) 4047 return V; 4048 4049 Optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL); 4050 if (Imp) 4051 return *Imp ? TrueVal : FalseVal; 4052 4053 return nullptr; 4054 } 4055 4056 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, 4057 const SimplifyQuery &Q) { 4058 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit); 4059 } 4060 4061 /// Given operands for an GetElementPtrInst, see if we can fold the result. 4062 /// If not, this returns null. 4063 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, 4064 const SimplifyQuery &Q, unsigned) { 4065 // The type of the GEP pointer operand. 4066 unsigned AS = 4067 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace(); 4068 4069 // getelementptr P -> P. 4070 if (Ops.size() == 1) 4071 return Ops[0]; 4072 4073 // Compute the (pointer) type returned by the GEP instruction. 4074 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1)); 4075 Type *GEPTy = PointerType::get(LastType, AS); 4076 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType())) 4077 GEPTy = VectorType::get(GEPTy, VT->getElementCount()); 4078 else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType())) 4079 GEPTy = VectorType::get(GEPTy, VT->getElementCount()); 4080 4081 if (isa<UndefValue>(Ops[0])) 4082 return UndefValue::get(GEPTy); 4083 4084 bool IsScalableVec = 4085 SrcTy->isVectorTy() ? SrcTy->getVectorIsScalable() : false; 4086 4087 if (Ops.size() == 2) { 4088 // getelementptr P, 0 -> P. 4089 if (match(Ops[1], m_Zero()) && Ops[0]->getType() == GEPTy) 4090 return Ops[0]; 4091 4092 Type *Ty = SrcTy; 4093 if (!IsScalableVec && Ty->isSized()) { 4094 Value *P; 4095 uint64_t C; 4096 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty); 4097 // getelementptr P, N -> P if P points to a type of zero size. 4098 if (TyAllocSize == 0 && Ops[0]->getType() == GEPTy) 4099 return Ops[0]; 4100 4101 // The following transforms are only safe if the ptrtoint cast 4102 // doesn't truncate the pointers. 4103 if (Ops[1]->getType()->getScalarSizeInBits() == 4104 Q.DL.getPointerSizeInBits(AS)) { 4105 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * { 4106 if (match(P, m_Zero())) 4107 return Constant::getNullValue(GEPTy); 4108 Value *Temp; 4109 if (match(P, m_PtrToInt(m_Value(Temp)))) 4110 if (Temp->getType() == GEPTy) 4111 return Temp; 4112 return nullptr; 4113 }; 4114 4115 // getelementptr V, (sub P, V) -> P if P points to a type of size 1. 4116 if (TyAllocSize == 1 && 4117 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))))) 4118 if (Value *R = PtrToIntOrZero(P)) 4119 return R; 4120 4121 // getelementptr V, (ashr (sub P, V), C) -> Q 4122 // if P points to a type of size 1 << C. 4123 if (match(Ops[1], 4124 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 4125 m_ConstantInt(C))) && 4126 TyAllocSize == 1ULL << C) 4127 if (Value *R = PtrToIntOrZero(P)) 4128 return R; 4129 4130 // getelementptr V, (sdiv (sub P, V), C) -> Q 4131 // if P points to a type of size C. 4132 if (match(Ops[1], 4133 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 4134 m_SpecificInt(TyAllocSize)))) 4135 if (Value *R = PtrToIntOrZero(P)) 4136 return R; 4137 } 4138 } 4139 } 4140 4141 if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 && 4142 all_of(Ops.slice(1).drop_back(1), 4143 [](Value *Idx) { return match(Idx, m_Zero()); })) { 4144 unsigned IdxWidth = 4145 Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace()); 4146 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) { 4147 APInt BasePtrOffset(IdxWidth, 0); 4148 Value *StrippedBasePtr = 4149 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL, 4150 BasePtrOffset); 4151 4152 // gep (gep V, C), (sub 0, V) -> C 4153 if (match(Ops.back(), 4154 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr))))) { 4155 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset); 4156 return ConstantExpr::getIntToPtr(CI, GEPTy); 4157 } 4158 // gep (gep V, C), (xor V, -1) -> C-1 4159 if (match(Ops.back(), 4160 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes()))) { 4161 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1); 4162 return ConstantExpr::getIntToPtr(CI, GEPTy); 4163 } 4164 } 4165 } 4166 4167 // Check to see if this is constant foldable. 4168 if (!all_of(Ops, [](Value *V) { return isa<Constant>(V); })) 4169 return nullptr; 4170 4171 auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]), 4172 Ops.slice(1)); 4173 return ConstantFoldConstant(CE, Q.DL); 4174 } 4175 4176 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, 4177 const SimplifyQuery &Q) { 4178 return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit); 4179 } 4180 4181 /// Given operands for an InsertValueInst, see if we can fold the result. 4182 /// If not, this returns null. 4183 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val, 4184 ArrayRef<unsigned> Idxs, const SimplifyQuery &Q, 4185 unsigned) { 4186 if (Constant *CAgg = dyn_cast<Constant>(Agg)) 4187 if (Constant *CVal = dyn_cast<Constant>(Val)) 4188 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs); 4189 4190 // insertvalue x, undef, n -> x 4191 if (match(Val, m_Undef())) 4192 return Agg; 4193 4194 // insertvalue x, (extractvalue y, n), n 4195 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val)) 4196 if (EV->getAggregateOperand()->getType() == Agg->getType() && 4197 EV->getIndices() == Idxs) { 4198 // insertvalue undef, (extractvalue y, n), n -> y 4199 if (match(Agg, m_Undef())) 4200 return EV->getAggregateOperand(); 4201 4202 // insertvalue y, (extractvalue y, n), n -> y 4203 if (Agg == EV->getAggregateOperand()) 4204 return Agg; 4205 } 4206 4207 return nullptr; 4208 } 4209 4210 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val, 4211 ArrayRef<unsigned> Idxs, 4212 const SimplifyQuery &Q) { 4213 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit); 4214 } 4215 4216 Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx, 4217 const SimplifyQuery &Q) { 4218 // Try to constant fold. 4219 auto *VecC = dyn_cast<Constant>(Vec); 4220 auto *ValC = dyn_cast<Constant>(Val); 4221 auto *IdxC = dyn_cast<Constant>(Idx); 4222 if (VecC && ValC && IdxC) 4223 return ConstantFoldInsertElementInstruction(VecC, ValC, IdxC); 4224 4225 // For fixed-length vector, fold into undef if index is out of bounds. 4226 if (auto *CI = dyn_cast<ConstantInt>(Idx)) { 4227 if (!Vec->getType()->getVectorIsScalable() && 4228 CI->uge(Vec->getType()->getVectorNumElements())) 4229 return UndefValue::get(Vec->getType()); 4230 } 4231 4232 // If index is undef, it might be out of bounds (see above case) 4233 if (isa<UndefValue>(Idx)) 4234 return UndefValue::get(Vec->getType()); 4235 4236 // Inserting an undef scalar? Assume it is the same value as the existing 4237 // vector element. 4238 if (isa<UndefValue>(Val)) 4239 return Vec; 4240 4241 // If we are extracting a value from a vector, then inserting it into the same 4242 // place, that's the input vector: 4243 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec 4244 if (match(Val, m_ExtractElement(m_Specific(Vec), m_Specific(Idx)))) 4245 return Vec; 4246 4247 return nullptr; 4248 } 4249 4250 /// Given operands for an ExtractValueInst, see if we can fold the result. 4251 /// If not, this returns null. 4252 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 4253 const SimplifyQuery &, unsigned) { 4254 if (auto *CAgg = dyn_cast<Constant>(Agg)) 4255 return ConstantFoldExtractValueInstruction(CAgg, Idxs); 4256 4257 // extractvalue x, (insertvalue y, elt, n), n -> elt 4258 unsigned NumIdxs = Idxs.size(); 4259 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr; 4260 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) { 4261 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices(); 4262 unsigned NumInsertValueIdxs = InsertValueIdxs.size(); 4263 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs); 4264 if (InsertValueIdxs.slice(0, NumCommonIdxs) == 4265 Idxs.slice(0, NumCommonIdxs)) { 4266 if (NumIdxs == NumInsertValueIdxs) 4267 return IVI->getInsertedValueOperand(); 4268 break; 4269 } 4270 } 4271 4272 return nullptr; 4273 } 4274 4275 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 4276 const SimplifyQuery &Q) { 4277 return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit); 4278 } 4279 4280 /// Given operands for an ExtractElementInst, see if we can fold the result. 4281 /// If not, this returns null. 4282 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &, 4283 unsigned) { 4284 if (auto *CVec = dyn_cast<Constant>(Vec)) { 4285 if (auto *CIdx = dyn_cast<Constant>(Idx)) 4286 return ConstantFoldExtractElementInstruction(CVec, CIdx); 4287 4288 // The index is not relevant if our vector is a splat. 4289 if (auto *Splat = CVec->getSplatValue()) 4290 return Splat; 4291 4292 if (isa<UndefValue>(Vec)) 4293 return UndefValue::get(Vec->getType()->getVectorElementType()); 4294 } 4295 4296 // If extracting a specified index from the vector, see if we can recursively 4297 // find a previously computed scalar that was inserted into the vector. 4298 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) { 4299 // For fixed-length vector, fold into undef if index is out of bounds. 4300 if (!Vec->getType()->getVectorIsScalable() && 4301 IdxC->getValue().uge(Vec->getType()->getVectorNumElements())) 4302 return UndefValue::get(Vec->getType()->getVectorElementType()); 4303 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue())) 4304 return Elt; 4305 } 4306 4307 // An undef extract index can be arbitrarily chosen to be an out-of-range 4308 // index value, which would result in the instruction being undef. 4309 if (isa<UndefValue>(Idx)) 4310 return UndefValue::get(Vec->getType()->getVectorElementType()); 4311 4312 return nullptr; 4313 } 4314 4315 Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx, 4316 const SimplifyQuery &Q) { 4317 return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit); 4318 } 4319 4320 /// See if we can fold the given phi. If not, returns null. 4321 static Value *SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q) { 4322 // If all of the PHI's incoming values are the same then replace the PHI node 4323 // with the common value. 4324 Value *CommonValue = nullptr; 4325 bool HasUndefInput = false; 4326 for (Value *Incoming : PN->incoming_values()) { 4327 // If the incoming value is the phi node itself, it can safely be skipped. 4328 if (Incoming == PN) continue; 4329 if (isa<UndefValue>(Incoming)) { 4330 // Remember that we saw an undef value, but otherwise ignore them. 4331 HasUndefInput = true; 4332 continue; 4333 } 4334 if (CommonValue && Incoming != CommonValue) 4335 return nullptr; // Not the same, bail out. 4336 CommonValue = Incoming; 4337 } 4338 4339 // If CommonValue is null then all of the incoming values were either undef or 4340 // equal to the phi node itself. 4341 if (!CommonValue) 4342 return UndefValue::get(PN->getType()); 4343 4344 // If we have a PHI node like phi(X, undef, X), where X is defined by some 4345 // instruction, we cannot return X as the result of the PHI node unless it 4346 // dominates the PHI block. 4347 if (HasUndefInput) 4348 return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr; 4349 4350 return CommonValue; 4351 } 4352 4353 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op, 4354 Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) { 4355 if (auto *C = dyn_cast<Constant>(Op)) 4356 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL); 4357 4358 if (auto *CI = dyn_cast<CastInst>(Op)) { 4359 auto *Src = CI->getOperand(0); 4360 Type *SrcTy = Src->getType(); 4361 Type *MidTy = CI->getType(); 4362 Type *DstTy = Ty; 4363 if (Src->getType() == Ty) { 4364 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode()); 4365 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc); 4366 Type *SrcIntPtrTy = 4367 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr; 4368 Type *MidIntPtrTy = 4369 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr; 4370 Type *DstIntPtrTy = 4371 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr; 4372 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy, 4373 SrcIntPtrTy, MidIntPtrTy, 4374 DstIntPtrTy) == Instruction::BitCast) 4375 return Src; 4376 } 4377 } 4378 4379 // bitcast x -> x 4380 if (CastOpc == Instruction::BitCast) 4381 if (Op->getType() == Ty) 4382 return Op; 4383 4384 return nullptr; 4385 } 4386 4387 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, 4388 const SimplifyQuery &Q) { 4389 return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit); 4390 } 4391 4392 /// For the given destination element of a shuffle, peek through shuffles to 4393 /// match a root vector source operand that contains that element in the same 4394 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s). 4395 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, 4396 int MaskVal, Value *RootVec, 4397 unsigned MaxRecurse) { 4398 if (!MaxRecurse--) 4399 return nullptr; 4400 4401 // Bail out if any mask value is undefined. That kind of shuffle may be 4402 // simplified further based on demanded bits or other folds. 4403 if (MaskVal == -1) 4404 return nullptr; 4405 4406 // The mask value chooses which source operand we need to look at next. 4407 int InVecNumElts = Op0->getType()->getVectorNumElements(); 4408 int RootElt = MaskVal; 4409 Value *SourceOp = Op0; 4410 if (MaskVal >= InVecNumElts) { 4411 RootElt = MaskVal - InVecNumElts; 4412 SourceOp = Op1; 4413 } 4414 4415 // If the source operand is a shuffle itself, look through it to find the 4416 // matching root vector. 4417 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) { 4418 return foldIdentityShuffles( 4419 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1), 4420 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse); 4421 } 4422 4423 // TODO: Look through bitcasts? What if the bitcast changes the vector element 4424 // size? 4425 4426 // The source operand is not a shuffle. Initialize the root vector value for 4427 // this shuffle if that has not been done yet. 4428 if (!RootVec) 4429 RootVec = SourceOp; 4430 4431 // Give up as soon as a source operand does not match the existing root value. 4432 if (RootVec != SourceOp) 4433 return nullptr; 4434 4435 // The element must be coming from the same lane in the source vector 4436 // (although it may have crossed lanes in intermediate shuffles). 4437 if (RootElt != DestElt) 4438 return nullptr; 4439 4440 return RootVec; 4441 } 4442 4443 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask, 4444 Type *RetTy, const SimplifyQuery &Q, 4445 unsigned MaxRecurse) { 4446 if (isa<UndefValue>(Mask)) 4447 return UndefValue::get(RetTy); 4448 4449 Type *InVecTy = Op0->getType(); 4450 ElementCount MaskEltCount = Mask->getType()->getVectorElementCount(); 4451 ElementCount InVecEltCount = InVecTy->getVectorElementCount(); 4452 4453 assert(MaskEltCount.Scalable == InVecEltCount.Scalable && 4454 "vscale mismatch between input vector and mask"); 4455 4456 bool Scalable = MaskEltCount.Scalable; 4457 4458 SmallVector<int, 32> Indices; 4459 if (!Scalable) { 4460 ShuffleVectorInst::getShuffleMask(Mask, Indices); 4461 assert(MaskEltCount.Min == Indices.size() && 4462 "Size of Indices not same as number of mask elements?"); 4463 } 4464 4465 if (!Scalable) { 4466 // Canonicalization: If mask does not select elements from an input vector, 4467 // replace that input vector with undef. 4468 bool MaskSelects0 = false, MaskSelects1 = false; 4469 for (unsigned i = 0; i != MaskEltCount.Min; ++i) { 4470 if (Indices[i] == -1) 4471 continue; 4472 if ((unsigned)Indices[i] < InVecEltCount.Min) 4473 MaskSelects0 = true; 4474 else 4475 MaskSelects1 = true; 4476 } 4477 if (!MaskSelects0) 4478 Op0 = UndefValue::get(InVecTy); 4479 if (!MaskSelects1) 4480 Op1 = UndefValue::get(InVecTy); 4481 } 4482 4483 auto *Op0Const = dyn_cast<Constant>(Op0); 4484 auto *Op1Const = dyn_cast<Constant>(Op1); 4485 4486 // If all operands are constant, constant fold the shuffle. This 4487 // transformation depends on the value of the mask which is not known at 4488 // compile time for scalable vectors 4489 if (!Scalable && Op0Const && Op1Const) 4490 return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask); 4491 4492 // Canonicalization: if only one input vector is constant, it shall be the 4493 // second one. This transformation depends on the value of the mask which 4494 // is not known at compile time for scalable vectors 4495 if (!Scalable && Op0Const && !Op1Const) { 4496 std::swap(Op0, Op1); 4497 ShuffleVectorInst::commuteShuffleMask(Indices, InVecEltCount.Min); 4498 } 4499 4500 // A splat of an inserted scalar constant becomes a vector constant: 4501 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...> 4502 // NOTE: We may have commuted above, so analyze the updated Indices, not the 4503 // original mask constant. 4504 // NOTE: This transformation depends on the value of the mask which is not 4505 // known at compile time for scalable vectors 4506 Constant *C; 4507 ConstantInt *IndexC; 4508 if (!Scalable && match(Op0, m_InsertElement(m_Value(), m_Constant(C), 4509 m_ConstantInt(IndexC)))) { 4510 // Match a splat shuffle mask of the insert index allowing undef elements. 4511 int InsertIndex = IndexC->getZExtValue(); 4512 if (all_of(Indices, [InsertIndex](int MaskElt) { 4513 return MaskElt == InsertIndex || MaskElt == -1; 4514 })) { 4515 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat"); 4516 4517 // Shuffle mask undefs become undefined constant result elements. 4518 SmallVector<Constant *, 16> VecC(MaskEltCount.Min, C); 4519 for (unsigned i = 0; i != MaskEltCount.Min; ++i) 4520 if (Indices[i] == -1) 4521 VecC[i] = UndefValue::get(C->getType()); 4522 return ConstantVector::get(VecC); 4523 } 4524 } 4525 4526 // A shuffle of a splat is always the splat itself. Legal if the shuffle's 4527 // value type is same as the input vectors' type. 4528 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0)) 4529 if (isa<UndefValue>(Op1) && RetTy == InVecTy && 4530 OpShuf->getMask()->getSplatValue()) 4531 return Op0; 4532 4533 // All remaining transformation depend on the value of the mask, which is 4534 // not known at compile time for scalable vectors. 4535 if (Scalable) 4536 return nullptr; 4537 4538 // Don't fold a shuffle with undef mask elements. This may get folded in a 4539 // better way using demanded bits or other analysis. 4540 // TODO: Should we allow this? 4541 if (find(Indices, -1) != Indices.end()) 4542 return nullptr; 4543 4544 // Check if every element of this shuffle can be mapped back to the 4545 // corresponding element of a single root vector. If so, we don't need this 4546 // shuffle. This handles simple identity shuffles as well as chains of 4547 // shuffles that may widen/narrow and/or move elements across lanes and back. 4548 Value *RootVec = nullptr; 4549 for (unsigned i = 0; i != MaskEltCount.Min; ++i) { 4550 // Note that recursion is limited for each vector element, so if any element 4551 // exceeds the limit, this will fail to simplify. 4552 RootVec = 4553 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse); 4554 4555 // We can't replace a widening/narrowing shuffle with one of its operands. 4556 if (!RootVec || RootVec->getType() != RetTy) 4557 return nullptr; 4558 } 4559 return RootVec; 4560 } 4561 4562 /// Given operands for a ShuffleVectorInst, fold the result or return null. 4563 Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask, 4564 Type *RetTy, const SimplifyQuery &Q) { 4565 return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit); 4566 } 4567 4568 static Constant *foldConstant(Instruction::UnaryOps Opcode, 4569 Value *&Op, const SimplifyQuery &Q) { 4570 if (auto *C = dyn_cast<Constant>(Op)) 4571 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL); 4572 return nullptr; 4573 } 4574 4575 /// Given the operand for an FNeg, see if we can fold the result. If not, this 4576 /// returns null. 4577 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF, 4578 const SimplifyQuery &Q, unsigned MaxRecurse) { 4579 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q)) 4580 return C; 4581 4582 Value *X; 4583 // fneg (fneg X) ==> X 4584 if (match(Op, m_FNeg(m_Value(X)))) 4585 return X; 4586 4587 return nullptr; 4588 } 4589 4590 Value *llvm::SimplifyFNegInst(Value *Op, FastMathFlags FMF, 4591 const SimplifyQuery &Q) { 4592 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit); 4593 } 4594 4595 static Constant *propagateNaN(Constant *In) { 4596 // If the input is a vector with undef elements, just return a default NaN. 4597 if (!In->isNaN()) 4598 return ConstantFP::getNaN(In->getType()); 4599 4600 // Propagate the existing NaN constant when possible. 4601 // TODO: Should we quiet a signaling NaN? 4602 return In; 4603 } 4604 4605 /// Perform folds that are common to any floating-point operation. This implies 4606 /// transforms based on undef/NaN because the operation itself makes no 4607 /// difference to the result. 4608 static Constant *simplifyFPOp(ArrayRef<Value *> Ops, 4609 FastMathFlags FMF = FastMathFlags()) { 4610 for (Value *V : Ops) { 4611 bool IsNan = match(V, m_NaN()); 4612 bool IsInf = match(V, m_Inf()); 4613 bool IsUndef = match(V, m_Undef()); 4614 4615 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand 4616 // (an undef operand can be chosen to be Nan/Inf), then the result of 4617 // this operation is poison. That result can be relaxed to undef. 4618 if (FMF.noNaNs() && (IsNan || IsUndef)) 4619 return UndefValue::get(V->getType()); 4620 if (FMF.noInfs() && (IsInf || IsUndef)) 4621 return UndefValue::get(V->getType()); 4622 4623 if (IsUndef || IsNan) 4624 return propagateNaN(cast<Constant>(V)); 4625 } 4626 return nullptr; 4627 } 4628 4629 /// Given operands for an FAdd, see if we can fold the result. If not, this 4630 /// returns null. 4631 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4632 const SimplifyQuery &Q, unsigned MaxRecurse) { 4633 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q)) 4634 return C; 4635 4636 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF)) 4637 return C; 4638 4639 // fadd X, -0 ==> X 4640 if (match(Op1, m_NegZeroFP())) 4641 return Op0; 4642 4643 // fadd X, 0 ==> X, when we know X is not -0 4644 if (match(Op1, m_PosZeroFP()) && 4645 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) 4646 return Op0; 4647 4648 // With nnan: -X + X --> 0.0 (and commuted variant) 4649 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN. 4650 // Negative zeros are allowed because we always end up with positive zero: 4651 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0 4652 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0 4653 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0 4654 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0 4655 if (FMF.noNaNs()) { 4656 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) || 4657 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0)))) 4658 return ConstantFP::getNullValue(Op0->getType()); 4659 4660 if (match(Op0, m_FNeg(m_Specific(Op1))) || 4661 match(Op1, m_FNeg(m_Specific(Op0)))) 4662 return ConstantFP::getNullValue(Op0->getType()); 4663 } 4664 4665 // (X - Y) + Y --> X 4666 // Y + (X - Y) --> X 4667 Value *X; 4668 if (FMF.noSignedZeros() && FMF.allowReassoc() && 4669 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) || 4670 match(Op1, m_FSub(m_Value(X), m_Specific(Op0))))) 4671 return X; 4672 4673 return nullptr; 4674 } 4675 4676 /// Given operands for an FSub, see if we can fold the result. If not, this 4677 /// returns null. 4678 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4679 const SimplifyQuery &Q, unsigned MaxRecurse) { 4680 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q)) 4681 return C; 4682 4683 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF)) 4684 return C; 4685 4686 // fsub X, +0 ==> X 4687 if (match(Op1, m_PosZeroFP())) 4688 return Op0; 4689 4690 // fsub X, -0 ==> X, when we know X is not -0 4691 if (match(Op1, m_NegZeroFP()) && 4692 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) 4693 return Op0; 4694 4695 // fsub -0.0, (fsub -0.0, X) ==> X 4696 // fsub -0.0, (fneg X) ==> X 4697 Value *X; 4698 if (match(Op0, m_NegZeroFP()) && 4699 match(Op1, m_FNeg(m_Value(X)))) 4700 return X; 4701 4702 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored. 4703 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored. 4704 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) && 4705 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) || 4706 match(Op1, m_FNeg(m_Value(X))))) 4707 return X; 4708 4709 // fsub nnan x, x ==> 0.0 4710 if (FMF.noNaNs() && Op0 == Op1) 4711 return Constant::getNullValue(Op0->getType()); 4712 4713 // Y - (Y - X) --> X 4714 // (X + Y) - Y --> X 4715 if (FMF.noSignedZeros() && FMF.allowReassoc() && 4716 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) || 4717 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X))))) 4718 return X; 4719 4720 return nullptr; 4721 } 4722 4723 static Value *SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, 4724 const SimplifyQuery &Q, unsigned MaxRecurse) { 4725 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF)) 4726 return C; 4727 4728 // fmul X, 1.0 ==> X 4729 if (match(Op1, m_FPOne())) 4730 return Op0; 4731 4732 // fmul 1.0, X ==> X 4733 if (match(Op0, m_FPOne())) 4734 return Op1; 4735 4736 // fmul nnan nsz X, 0 ==> 0 4737 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZeroFP())) 4738 return ConstantFP::getNullValue(Op0->getType()); 4739 4740 // fmul nnan nsz 0, X ==> 0 4741 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP())) 4742 return ConstantFP::getNullValue(Op1->getType()); 4743 4744 // sqrt(X) * sqrt(X) --> X, if we can: 4745 // 1. Remove the intermediate rounding (reassociate). 4746 // 2. Ignore non-zero negative numbers because sqrt would produce NAN. 4747 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0. 4748 Value *X; 4749 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) && 4750 FMF.allowReassoc() && FMF.noNaNs() && FMF.noSignedZeros()) 4751 return X; 4752 4753 return nullptr; 4754 } 4755 4756 /// Given the operands for an FMul, see if we can fold the result 4757 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4758 const SimplifyQuery &Q, unsigned MaxRecurse) { 4759 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q)) 4760 return C; 4761 4762 // Now apply simplifications that do not require rounding. 4763 return SimplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse); 4764 } 4765 4766 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4767 const SimplifyQuery &Q) { 4768 return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit); 4769 } 4770 4771 4772 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4773 const SimplifyQuery &Q) { 4774 return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit); 4775 } 4776 4777 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4778 const SimplifyQuery &Q) { 4779 return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit); 4780 } 4781 4782 Value *llvm::SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, 4783 const SimplifyQuery &Q) { 4784 return ::SimplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit); 4785 } 4786 4787 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4788 const SimplifyQuery &Q, unsigned) { 4789 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q)) 4790 return C; 4791 4792 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF)) 4793 return C; 4794 4795 // X / 1.0 -> X 4796 if (match(Op1, m_FPOne())) 4797 return Op0; 4798 4799 // 0 / X -> 0 4800 // Requires that NaNs are off (X could be zero) and signed zeroes are 4801 // ignored (X could be positive or negative, so the output sign is unknown). 4802 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP())) 4803 return ConstantFP::getNullValue(Op0->getType()); 4804 4805 if (FMF.noNaNs()) { 4806 // X / X -> 1.0 is legal when NaNs are ignored. 4807 // We can ignore infinities because INF/INF is NaN. 4808 if (Op0 == Op1) 4809 return ConstantFP::get(Op0->getType(), 1.0); 4810 4811 // (X * Y) / Y --> X if we can reassociate to the above form. 4812 Value *X; 4813 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1)))) 4814 return X; 4815 4816 // -X / X -> -1.0 and 4817 // X / -X -> -1.0 are legal when NaNs are ignored. 4818 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored. 4819 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) || 4820 match(Op1, m_FNegNSZ(m_Specific(Op0)))) 4821 return ConstantFP::get(Op0->getType(), -1.0); 4822 } 4823 4824 return nullptr; 4825 } 4826 4827 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4828 const SimplifyQuery &Q) { 4829 return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit); 4830 } 4831 4832 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4833 const SimplifyQuery &Q, unsigned) { 4834 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q)) 4835 return C; 4836 4837 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF)) 4838 return C; 4839 4840 // Unlike fdiv, the result of frem always matches the sign of the dividend. 4841 // The constant match may include undef elements in a vector, so return a full 4842 // zero constant as the result. 4843 if (FMF.noNaNs()) { 4844 // +0 % X -> 0 4845 if (match(Op0, m_PosZeroFP())) 4846 return ConstantFP::getNullValue(Op0->getType()); 4847 // -0 % X -> -0 4848 if (match(Op0, m_NegZeroFP())) 4849 return ConstantFP::getNegativeZero(Op0->getType()); 4850 } 4851 4852 return nullptr; 4853 } 4854 4855 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4856 const SimplifyQuery &Q) { 4857 return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit); 4858 } 4859 4860 //=== Helper functions for higher up the class hierarchy. 4861 4862 /// Given the operand for a UnaryOperator, see if we can fold the result. 4863 /// If not, this returns null. 4864 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q, 4865 unsigned MaxRecurse) { 4866 switch (Opcode) { 4867 case Instruction::FNeg: 4868 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse); 4869 default: 4870 llvm_unreachable("Unexpected opcode"); 4871 } 4872 } 4873 4874 /// Given the operand for a UnaryOperator, see if we can fold the result. 4875 /// If not, this returns null. 4876 /// Try to use FastMathFlags when folding the result. 4877 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op, 4878 const FastMathFlags &FMF, 4879 const SimplifyQuery &Q, unsigned MaxRecurse) { 4880 switch (Opcode) { 4881 case Instruction::FNeg: 4882 return simplifyFNegInst(Op, FMF, Q, MaxRecurse); 4883 default: 4884 return simplifyUnOp(Opcode, Op, Q, MaxRecurse); 4885 } 4886 } 4887 4888 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) { 4889 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit); 4890 } 4891 4892 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF, 4893 const SimplifyQuery &Q) { 4894 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit); 4895 } 4896 4897 /// Given operands for a BinaryOperator, see if we can fold the result. 4898 /// If not, this returns null. 4899 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4900 const SimplifyQuery &Q, unsigned MaxRecurse) { 4901 switch (Opcode) { 4902 case Instruction::Add: 4903 return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse); 4904 case Instruction::Sub: 4905 return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse); 4906 case Instruction::Mul: 4907 return SimplifyMulInst(LHS, RHS, Q, MaxRecurse); 4908 case Instruction::SDiv: 4909 return SimplifySDivInst(LHS, RHS, Q, MaxRecurse); 4910 case Instruction::UDiv: 4911 return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse); 4912 case Instruction::SRem: 4913 return SimplifySRemInst(LHS, RHS, Q, MaxRecurse); 4914 case Instruction::URem: 4915 return SimplifyURemInst(LHS, RHS, Q, MaxRecurse); 4916 case Instruction::Shl: 4917 return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse); 4918 case Instruction::LShr: 4919 return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse); 4920 case Instruction::AShr: 4921 return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse); 4922 case Instruction::And: 4923 return SimplifyAndInst(LHS, RHS, Q, MaxRecurse); 4924 case Instruction::Or: 4925 return SimplifyOrInst(LHS, RHS, Q, MaxRecurse); 4926 case Instruction::Xor: 4927 return SimplifyXorInst(LHS, RHS, Q, MaxRecurse); 4928 case Instruction::FAdd: 4929 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4930 case Instruction::FSub: 4931 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4932 case Instruction::FMul: 4933 return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4934 case Instruction::FDiv: 4935 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4936 case Instruction::FRem: 4937 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4938 default: 4939 llvm_unreachable("Unexpected opcode"); 4940 } 4941 } 4942 4943 /// Given operands for a BinaryOperator, see if we can fold the result. 4944 /// If not, this returns null. 4945 /// Try to use FastMathFlags when folding the result. 4946 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4947 const FastMathFlags &FMF, const SimplifyQuery &Q, 4948 unsigned MaxRecurse) { 4949 switch (Opcode) { 4950 case Instruction::FAdd: 4951 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse); 4952 case Instruction::FSub: 4953 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse); 4954 case Instruction::FMul: 4955 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse); 4956 case Instruction::FDiv: 4957 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse); 4958 default: 4959 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse); 4960 } 4961 } 4962 4963 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4964 const SimplifyQuery &Q) { 4965 return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit); 4966 } 4967 4968 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4969 FastMathFlags FMF, const SimplifyQuery &Q) { 4970 return ::SimplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit); 4971 } 4972 4973 /// Given operands for a CmpInst, see if we can fold the result. 4974 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 4975 const SimplifyQuery &Q, unsigned MaxRecurse) { 4976 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate)) 4977 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse); 4978 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4979 } 4980 4981 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 4982 const SimplifyQuery &Q) { 4983 return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit); 4984 } 4985 4986 static bool IsIdempotent(Intrinsic::ID ID) { 4987 switch (ID) { 4988 default: return false; 4989 4990 // Unary idempotent: f(f(x)) = f(x) 4991 case Intrinsic::fabs: 4992 case Intrinsic::floor: 4993 case Intrinsic::ceil: 4994 case Intrinsic::trunc: 4995 case Intrinsic::rint: 4996 case Intrinsic::nearbyint: 4997 case Intrinsic::round: 4998 case Intrinsic::canonicalize: 4999 return true; 5000 } 5001 } 5002 5003 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset, 5004 const DataLayout &DL) { 5005 GlobalValue *PtrSym; 5006 APInt PtrOffset; 5007 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL)) 5008 return nullptr; 5009 5010 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext()); 5011 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext()); 5012 Type *Int32PtrTy = Int32Ty->getPointerTo(); 5013 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext()); 5014 5015 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset); 5016 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64) 5017 return nullptr; 5018 5019 uint64_t OffsetInt = OffsetConstInt->getSExtValue(); 5020 if (OffsetInt % 4 != 0) 5021 return nullptr; 5022 5023 Constant *C = ConstantExpr::getGetElementPtr( 5024 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy), 5025 ConstantInt::get(Int64Ty, OffsetInt / 4)); 5026 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL); 5027 if (!Loaded) 5028 return nullptr; 5029 5030 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded); 5031 if (!LoadedCE) 5032 return nullptr; 5033 5034 if (LoadedCE->getOpcode() == Instruction::Trunc) { 5035 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0)); 5036 if (!LoadedCE) 5037 return nullptr; 5038 } 5039 5040 if (LoadedCE->getOpcode() != Instruction::Sub) 5041 return nullptr; 5042 5043 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0)); 5044 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt) 5045 return nullptr; 5046 auto *LoadedLHSPtr = LoadedLHS->getOperand(0); 5047 5048 Constant *LoadedRHS = LoadedCE->getOperand(1); 5049 GlobalValue *LoadedRHSSym; 5050 APInt LoadedRHSOffset; 5051 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset, 5052 DL) || 5053 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset) 5054 return nullptr; 5055 5056 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy); 5057 } 5058 5059 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0, 5060 const SimplifyQuery &Q) { 5061 // Idempotent functions return the same result when called repeatedly. 5062 Intrinsic::ID IID = F->getIntrinsicID(); 5063 if (IsIdempotent(IID)) 5064 if (auto *II = dyn_cast<IntrinsicInst>(Op0)) 5065 if (II->getIntrinsicID() == IID) 5066 return II; 5067 5068 Value *X; 5069 switch (IID) { 5070 case Intrinsic::fabs: 5071 if (SignBitMustBeZero(Op0, Q.TLI)) return Op0; 5072 break; 5073 case Intrinsic::bswap: 5074 // bswap(bswap(x)) -> x 5075 if (match(Op0, m_BSwap(m_Value(X)))) return X; 5076 break; 5077 case Intrinsic::bitreverse: 5078 // bitreverse(bitreverse(x)) -> x 5079 if (match(Op0, m_BitReverse(m_Value(X)))) return X; 5080 break; 5081 case Intrinsic::exp: 5082 // exp(log(x)) -> x 5083 if (Q.CxtI->hasAllowReassoc() && 5084 match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X)))) return X; 5085 break; 5086 case Intrinsic::exp2: 5087 // exp2(log2(x)) -> x 5088 if (Q.CxtI->hasAllowReassoc() && 5089 match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X)))) return X; 5090 break; 5091 case Intrinsic::log: 5092 // log(exp(x)) -> x 5093 if (Q.CxtI->hasAllowReassoc() && 5094 match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X)))) return X; 5095 break; 5096 case Intrinsic::log2: 5097 // log2(exp2(x)) -> x 5098 if (Q.CxtI->hasAllowReassoc() && 5099 (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) || 5100 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0), 5101 m_Value(X))))) return X; 5102 break; 5103 case Intrinsic::log10: 5104 // log10(pow(10.0, x)) -> x 5105 if (Q.CxtI->hasAllowReassoc() && 5106 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0), 5107 m_Value(X)))) return X; 5108 break; 5109 case Intrinsic::floor: 5110 case Intrinsic::trunc: 5111 case Intrinsic::ceil: 5112 case Intrinsic::round: 5113 case Intrinsic::nearbyint: 5114 case Intrinsic::rint: { 5115 // floor (sitofp x) -> sitofp x 5116 // floor (uitofp x) -> uitofp x 5117 // 5118 // Converting from int always results in a finite integral number or 5119 // infinity. For either of those inputs, these rounding functions always 5120 // return the same value, so the rounding can be eliminated. 5121 if (match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value()))) 5122 return Op0; 5123 break; 5124 } 5125 default: 5126 break; 5127 } 5128 5129 return nullptr; 5130 } 5131 5132 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1, 5133 const SimplifyQuery &Q) { 5134 Intrinsic::ID IID = F->getIntrinsicID(); 5135 Type *ReturnType = F->getReturnType(); 5136 switch (IID) { 5137 case Intrinsic::usub_with_overflow: 5138 case Intrinsic::ssub_with_overflow: 5139 // X - X -> { 0, false } 5140 if (Op0 == Op1) 5141 return Constant::getNullValue(ReturnType); 5142 LLVM_FALLTHROUGH; 5143 case Intrinsic::uadd_with_overflow: 5144 case Intrinsic::sadd_with_overflow: 5145 // X - undef -> { undef, false } 5146 // undef - X -> { undef, false } 5147 // X + undef -> { undef, false } 5148 // undef + x -> { undef, false } 5149 if (isa<UndefValue>(Op0) || isa<UndefValue>(Op1)) { 5150 return ConstantStruct::get( 5151 cast<StructType>(ReturnType), 5152 {UndefValue::get(ReturnType->getStructElementType(0)), 5153 Constant::getNullValue(ReturnType->getStructElementType(1))}); 5154 } 5155 break; 5156 case Intrinsic::umul_with_overflow: 5157 case Intrinsic::smul_with_overflow: 5158 // 0 * X -> { 0, false } 5159 // X * 0 -> { 0, false } 5160 if (match(Op0, m_Zero()) || match(Op1, m_Zero())) 5161 return Constant::getNullValue(ReturnType); 5162 // undef * X -> { 0, false } 5163 // X * undef -> { 0, false } 5164 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 5165 return Constant::getNullValue(ReturnType); 5166 break; 5167 case Intrinsic::uadd_sat: 5168 // sat(MAX + X) -> MAX 5169 // sat(X + MAX) -> MAX 5170 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes())) 5171 return Constant::getAllOnesValue(ReturnType); 5172 LLVM_FALLTHROUGH; 5173 case Intrinsic::sadd_sat: 5174 // sat(X + undef) -> -1 5175 // sat(undef + X) -> -1 5176 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1). 5177 // For signed: Assume undef is ~X, in which case X + ~X = -1. 5178 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 5179 return Constant::getAllOnesValue(ReturnType); 5180 5181 // X + 0 -> X 5182 if (match(Op1, m_Zero())) 5183 return Op0; 5184 // 0 + X -> X 5185 if (match(Op0, m_Zero())) 5186 return Op1; 5187 break; 5188 case Intrinsic::usub_sat: 5189 // sat(0 - X) -> 0, sat(X - MAX) -> 0 5190 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes())) 5191 return Constant::getNullValue(ReturnType); 5192 LLVM_FALLTHROUGH; 5193 case Intrinsic::ssub_sat: 5194 // X - X -> 0, X - undef -> 0, undef - X -> 0 5195 if (Op0 == Op1 || match(Op0, m_Undef()) || match(Op1, m_Undef())) 5196 return Constant::getNullValue(ReturnType); 5197 // X - 0 -> X 5198 if (match(Op1, m_Zero())) 5199 return Op0; 5200 break; 5201 case Intrinsic::load_relative: 5202 if (auto *C0 = dyn_cast<Constant>(Op0)) 5203 if (auto *C1 = dyn_cast<Constant>(Op1)) 5204 return SimplifyRelativeLoad(C0, C1, Q.DL); 5205 break; 5206 case Intrinsic::powi: 5207 if (auto *Power = dyn_cast<ConstantInt>(Op1)) { 5208 // powi(x, 0) -> 1.0 5209 if (Power->isZero()) 5210 return ConstantFP::get(Op0->getType(), 1.0); 5211 // powi(x, 1) -> x 5212 if (Power->isOne()) 5213 return Op0; 5214 } 5215 break; 5216 case Intrinsic::copysign: 5217 // copysign X, X --> X 5218 if (Op0 == Op1) 5219 return Op0; 5220 // copysign -X, X --> X 5221 // copysign X, -X --> -X 5222 if (match(Op0, m_FNeg(m_Specific(Op1))) || 5223 match(Op1, m_FNeg(m_Specific(Op0)))) 5224 return Op1; 5225 break; 5226 case Intrinsic::maxnum: 5227 case Intrinsic::minnum: 5228 case Intrinsic::maximum: 5229 case Intrinsic::minimum: { 5230 // If the arguments are the same, this is a no-op. 5231 if (Op0 == Op1) return Op0; 5232 5233 // If one argument is undef, return the other argument. 5234 if (match(Op0, m_Undef())) 5235 return Op1; 5236 if (match(Op1, m_Undef())) 5237 return Op0; 5238 5239 // If one argument is NaN, return other or NaN appropriately. 5240 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum; 5241 if (match(Op0, m_NaN())) 5242 return PropagateNaN ? Op0 : Op1; 5243 if (match(Op1, m_NaN())) 5244 return PropagateNaN ? Op1 : Op0; 5245 5246 // Min/max of the same operation with common operand: 5247 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants) 5248 if (auto *M0 = dyn_cast<IntrinsicInst>(Op0)) 5249 if (M0->getIntrinsicID() == IID && 5250 (M0->getOperand(0) == Op1 || M0->getOperand(1) == Op1)) 5251 return Op0; 5252 if (auto *M1 = dyn_cast<IntrinsicInst>(Op1)) 5253 if (M1->getIntrinsicID() == IID && 5254 (M1->getOperand(0) == Op0 || M1->getOperand(1) == Op0)) 5255 return Op1; 5256 5257 // min(X, -Inf) --> -Inf (and commuted variant) 5258 // max(X, +Inf) --> +Inf (and commuted variant) 5259 bool UseNegInf = IID == Intrinsic::minnum || IID == Intrinsic::minimum; 5260 const APFloat *C; 5261 if ((match(Op0, m_APFloat(C)) && C->isInfinity() && 5262 C->isNegative() == UseNegInf) || 5263 (match(Op1, m_APFloat(C)) && C->isInfinity() && 5264 C->isNegative() == UseNegInf)) 5265 return ConstantFP::getInfinity(ReturnType, UseNegInf); 5266 5267 // TODO: minnum(nnan x, inf) -> x 5268 // TODO: minnum(nnan ninf x, flt_max) -> x 5269 // TODO: maxnum(nnan x, -inf) -> x 5270 // TODO: maxnum(nnan ninf x, -flt_max) -> x 5271 break; 5272 } 5273 default: 5274 break; 5275 } 5276 5277 return nullptr; 5278 } 5279 5280 static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) { 5281 5282 // Intrinsics with no operands have some kind of side effect. Don't simplify. 5283 unsigned NumOperands = Call->getNumArgOperands(); 5284 if (!NumOperands) 5285 return nullptr; 5286 5287 Function *F = cast<Function>(Call->getCalledFunction()); 5288 Intrinsic::ID IID = F->getIntrinsicID(); 5289 if (NumOperands == 1) 5290 return simplifyUnaryIntrinsic(F, Call->getArgOperand(0), Q); 5291 5292 if (NumOperands == 2) 5293 return simplifyBinaryIntrinsic(F, Call->getArgOperand(0), 5294 Call->getArgOperand(1), Q); 5295 5296 // Handle intrinsics with 3 or more arguments. 5297 switch (IID) { 5298 case Intrinsic::masked_load: 5299 case Intrinsic::masked_gather: { 5300 Value *MaskArg = Call->getArgOperand(2); 5301 Value *PassthruArg = Call->getArgOperand(3); 5302 // If the mask is all zeros or undef, the "passthru" argument is the result. 5303 if (maskIsAllZeroOrUndef(MaskArg)) 5304 return PassthruArg; 5305 return nullptr; 5306 } 5307 case Intrinsic::fshl: 5308 case Intrinsic::fshr: { 5309 Value *Op0 = Call->getArgOperand(0), *Op1 = Call->getArgOperand(1), 5310 *ShAmtArg = Call->getArgOperand(2); 5311 5312 // If both operands are undef, the result is undef. 5313 if (match(Op0, m_Undef()) && match(Op1, m_Undef())) 5314 return UndefValue::get(F->getReturnType()); 5315 5316 // If shift amount is undef, assume it is zero. 5317 if (match(ShAmtArg, m_Undef())) 5318 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1); 5319 5320 const APInt *ShAmtC; 5321 if (match(ShAmtArg, m_APInt(ShAmtC))) { 5322 // If there's effectively no shift, return the 1st arg or 2nd arg. 5323 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth()); 5324 if (ShAmtC->urem(BitWidth).isNullValue()) 5325 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1); 5326 } 5327 return nullptr; 5328 } 5329 case Intrinsic::fma: 5330 case Intrinsic::fmuladd: { 5331 Value *Op0 = Call->getArgOperand(0); 5332 Value *Op1 = Call->getArgOperand(1); 5333 Value *Op2 = Call->getArgOperand(2); 5334 if (Value *V = simplifyFPOp({ Op0, Op1, Op2 })) 5335 return V; 5336 return nullptr; 5337 } 5338 default: 5339 return nullptr; 5340 } 5341 } 5342 5343 Value *llvm::SimplifyCall(CallBase *Call, const SimplifyQuery &Q) { 5344 Value *Callee = Call->getCalledValue(); 5345 5346 // musttail calls can only be simplified if they are also DCEd. 5347 // As we can't guarantee this here, don't simplify them. 5348 if (Call->isMustTailCall()) 5349 return nullptr; 5350 5351 // call undef -> undef 5352 // call null -> undef 5353 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee)) 5354 return UndefValue::get(Call->getType()); 5355 5356 Function *F = dyn_cast<Function>(Callee); 5357 if (!F) 5358 return nullptr; 5359 5360 if (F->isIntrinsic()) 5361 if (Value *Ret = simplifyIntrinsic(Call, Q)) 5362 return Ret; 5363 5364 if (!canConstantFoldCallTo(Call, F)) 5365 return nullptr; 5366 5367 SmallVector<Constant *, 4> ConstantArgs; 5368 unsigned NumArgs = Call->getNumArgOperands(); 5369 ConstantArgs.reserve(NumArgs); 5370 for (auto &Arg : Call->args()) { 5371 Constant *C = dyn_cast<Constant>(&Arg); 5372 if (!C) 5373 return nullptr; 5374 ConstantArgs.push_back(C); 5375 } 5376 5377 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI); 5378 } 5379 5380 /// Given operands for a Freeze, see if we can fold the result. 5381 static Value *SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) { 5382 // Use a utility function defined in ValueTracking. 5383 if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0, Q.CxtI, Q.DT)) 5384 return Op0; 5385 // We have room for improvement. 5386 return nullptr; 5387 } 5388 5389 Value *llvm::SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) { 5390 return ::SimplifyFreezeInst(Op0, Q); 5391 } 5392 5393 /// See if we can compute a simplified version of this instruction. 5394 /// If not, this returns null. 5395 5396 Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ, 5397 OptimizationRemarkEmitter *ORE) { 5398 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I); 5399 Value *Result; 5400 5401 switch (I->getOpcode()) { 5402 default: 5403 Result = ConstantFoldInstruction(I, Q.DL, Q.TLI); 5404 break; 5405 case Instruction::FNeg: 5406 Result = SimplifyFNegInst(I->getOperand(0), I->getFastMathFlags(), Q); 5407 break; 5408 case Instruction::FAdd: 5409 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1), 5410 I->getFastMathFlags(), Q); 5411 break; 5412 case Instruction::Add: 5413 Result = 5414 SimplifyAddInst(I->getOperand(0), I->getOperand(1), 5415 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), 5416 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); 5417 break; 5418 case Instruction::FSub: 5419 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1), 5420 I->getFastMathFlags(), Q); 5421 break; 5422 case Instruction::Sub: 5423 Result = 5424 SimplifySubInst(I->getOperand(0), I->getOperand(1), 5425 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), 5426 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); 5427 break; 5428 case Instruction::FMul: 5429 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1), 5430 I->getFastMathFlags(), Q); 5431 break; 5432 case Instruction::Mul: 5433 Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), Q); 5434 break; 5435 case Instruction::SDiv: 5436 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), Q); 5437 break; 5438 case Instruction::UDiv: 5439 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), Q); 5440 break; 5441 case Instruction::FDiv: 5442 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), 5443 I->getFastMathFlags(), Q); 5444 break; 5445 case Instruction::SRem: 5446 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), Q); 5447 break; 5448 case Instruction::URem: 5449 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), Q); 5450 break; 5451 case Instruction::FRem: 5452 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), 5453 I->getFastMathFlags(), Q); 5454 break; 5455 case Instruction::Shl: 5456 Result = 5457 SimplifyShlInst(I->getOperand(0), I->getOperand(1), 5458 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), 5459 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); 5460 break; 5461 case Instruction::LShr: 5462 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1), 5463 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q); 5464 break; 5465 case Instruction::AShr: 5466 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1), 5467 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q); 5468 break; 5469 case Instruction::And: 5470 Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q); 5471 break; 5472 case Instruction::Or: 5473 Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), Q); 5474 break; 5475 case Instruction::Xor: 5476 Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), Q); 5477 break; 5478 case Instruction::ICmp: 5479 Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), 5480 I->getOperand(0), I->getOperand(1), Q); 5481 break; 5482 case Instruction::FCmp: 5483 Result = 5484 SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0), 5485 I->getOperand(1), I->getFastMathFlags(), Q); 5486 break; 5487 case Instruction::Select: 5488 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1), 5489 I->getOperand(2), Q); 5490 break; 5491 case Instruction::GetElementPtr: { 5492 SmallVector<Value *, 8> Ops(I->op_begin(), I->op_end()); 5493 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(), 5494 Ops, Q); 5495 break; 5496 } 5497 case Instruction::InsertValue: { 5498 InsertValueInst *IV = cast<InsertValueInst>(I); 5499 Result = SimplifyInsertValueInst(IV->getAggregateOperand(), 5500 IV->getInsertedValueOperand(), 5501 IV->getIndices(), Q); 5502 break; 5503 } 5504 case Instruction::InsertElement: { 5505 auto *IE = cast<InsertElementInst>(I); 5506 Result = SimplifyInsertElementInst(IE->getOperand(0), IE->getOperand(1), 5507 IE->getOperand(2), Q); 5508 break; 5509 } 5510 case Instruction::ExtractValue: { 5511 auto *EVI = cast<ExtractValueInst>(I); 5512 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(), 5513 EVI->getIndices(), Q); 5514 break; 5515 } 5516 case Instruction::ExtractElement: { 5517 auto *EEI = cast<ExtractElementInst>(I); 5518 Result = SimplifyExtractElementInst(EEI->getVectorOperand(), 5519 EEI->getIndexOperand(), Q); 5520 break; 5521 } 5522 case Instruction::ShuffleVector: { 5523 auto *SVI = cast<ShuffleVectorInst>(I); 5524 Result = SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 5525 SVI->getMask(), SVI->getType(), Q); 5526 break; 5527 } 5528 case Instruction::PHI: 5529 Result = SimplifyPHINode(cast<PHINode>(I), Q); 5530 break; 5531 case Instruction::Call: { 5532 Result = SimplifyCall(cast<CallInst>(I), Q); 5533 // Don't perform known bits simplification below for musttail calls. 5534 if (cast<CallInst>(I)->isMustTailCall()) 5535 return Result; 5536 break; 5537 } 5538 case Instruction::Freeze: 5539 Result = SimplifyFreezeInst(I->getOperand(0), Q); 5540 break; 5541 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc: 5542 #include "llvm/IR/Instruction.def" 5543 #undef HANDLE_CAST_INST 5544 Result = 5545 SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), Q); 5546 break; 5547 case Instruction::Alloca: 5548 // No simplifications for Alloca and it can't be constant folded. 5549 Result = nullptr; 5550 break; 5551 } 5552 5553 // In general, it is possible for computeKnownBits to determine all bits in a 5554 // value even when the operands are not all constants. 5555 if (!Result && I->getType()->isIntOrIntVectorTy()) { 5556 KnownBits Known = computeKnownBits(I, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE); 5557 if (Known.isConstant()) 5558 Result = ConstantInt::get(I->getType(), Known.getConstant()); 5559 } 5560 5561 /// If called on unreachable code, the above logic may report that the 5562 /// instruction simplified to itself. Make life easier for users by 5563 /// detecting that case here, returning a safe value instead. 5564 return Result == I ? UndefValue::get(I->getType()) : Result; 5565 } 5566 5567 /// Implementation of recursive simplification through an instruction's 5568 /// uses. 5569 /// 5570 /// This is the common implementation of the recursive simplification routines. 5571 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to 5572 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of 5573 /// instructions to process and attempt to simplify it using 5574 /// InstructionSimplify. Recursively visited users which could not be 5575 /// simplified themselves are to the optional UnsimplifiedUsers set for 5576 /// further processing by the caller. 5577 /// 5578 /// This routine returns 'true' only when *it* simplifies something. The passed 5579 /// in simplified value does not count toward this. 5580 static bool replaceAndRecursivelySimplifyImpl( 5581 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, 5582 const DominatorTree *DT, AssumptionCache *AC, 5583 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) { 5584 bool Simplified = false; 5585 SmallSetVector<Instruction *, 8> Worklist; 5586 const DataLayout &DL = I->getModule()->getDataLayout(); 5587 5588 // If we have an explicit value to collapse to, do that round of the 5589 // simplification loop by hand initially. 5590 if (SimpleV) { 5591 for (User *U : I->users()) 5592 if (U != I) 5593 Worklist.insert(cast<Instruction>(U)); 5594 5595 // Replace the instruction with its simplified value. 5596 I->replaceAllUsesWith(SimpleV); 5597 5598 // Gracefully handle edge cases where the instruction is not wired into any 5599 // parent block. 5600 if (I->getParent() && !I->isEHPad() && !I->isTerminator() && 5601 !I->mayHaveSideEffects()) 5602 I->eraseFromParent(); 5603 } else { 5604 Worklist.insert(I); 5605 } 5606 5607 // Note that we must test the size on each iteration, the worklist can grow. 5608 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) { 5609 I = Worklist[Idx]; 5610 5611 // See if this instruction simplifies. 5612 SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC}); 5613 if (!SimpleV) { 5614 if (UnsimplifiedUsers) 5615 UnsimplifiedUsers->insert(I); 5616 continue; 5617 } 5618 5619 Simplified = true; 5620 5621 // Stash away all the uses of the old instruction so we can check them for 5622 // recursive simplifications after a RAUW. This is cheaper than checking all 5623 // uses of To on the recursive step in most cases. 5624 for (User *U : I->users()) 5625 Worklist.insert(cast<Instruction>(U)); 5626 5627 // Replace the instruction with its simplified value. 5628 I->replaceAllUsesWith(SimpleV); 5629 5630 // Gracefully handle edge cases where the instruction is not wired into any 5631 // parent block. 5632 if (I->getParent() && !I->isEHPad() && !I->isTerminator() && 5633 !I->mayHaveSideEffects()) 5634 I->eraseFromParent(); 5635 } 5636 return Simplified; 5637 } 5638 5639 bool llvm::recursivelySimplifyInstruction(Instruction *I, 5640 const TargetLibraryInfo *TLI, 5641 const DominatorTree *DT, 5642 AssumptionCache *AC) { 5643 return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC, nullptr); 5644 } 5645 5646 bool llvm::replaceAndRecursivelySimplify( 5647 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, 5648 const DominatorTree *DT, AssumptionCache *AC, 5649 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) { 5650 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!"); 5651 assert(SimpleV && "Must provide a simplified value."); 5652 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC, 5653 UnsimplifiedUsers); 5654 } 5655 5656 namespace llvm { 5657 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) { 5658 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 5659 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; 5660 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 5661 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr; 5662 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>(); 5663 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr; 5664 return {F.getParent()->getDataLayout(), TLI, DT, AC}; 5665 } 5666 5667 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR, 5668 const DataLayout &DL) { 5669 return {DL, &AR.TLI, &AR.DT, &AR.AC}; 5670 } 5671 5672 template <class T, class... TArgs> 5673 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM, 5674 Function &F) { 5675 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F); 5676 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F); 5677 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F); 5678 return {F.getParent()->getDataLayout(), TLI, DT, AC}; 5679 } 5680 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &, 5681 Function &); 5682 } 5683