1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements routines for folding instructions into simpler forms 10 // that do not require creating new instructions. This does constant folding 11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either 12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value 13 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been 14 // simplified: This is usually true and assuming it simplifies the logic (if 15 // they have not been simplified then results are correct but maybe suboptimal). 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/CmpInstAnalysis.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/LoopAnalysisManager.h" 28 #include "llvm/Analysis/MemoryBuiltins.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Analysis/VectorUtils.h" 31 #include "llvm/IR/ConstantRange.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/Dominators.h" 34 #include "llvm/IR/GetElementPtrTypeIterator.h" 35 #include "llvm/IR/GlobalAlias.h" 36 #include "llvm/IR/InstrTypes.h" 37 #include "llvm/IR/Instructions.h" 38 #include "llvm/IR/Operator.h" 39 #include "llvm/IR/PatternMatch.h" 40 #include "llvm/IR/ValueHandle.h" 41 #include "llvm/Support/KnownBits.h" 42 #include <algorithm> 43 using namespace llvm; 44 using namespace llvm::PatternMatch; 45 46 #define DEBUG_TYPE "instsimplify" 47 48 enum { RecursionLimit = 3 }; 49 50 STATISTIC(NumExpand, "Number of expansions"); 51 STATISTIC(NumReassoc, "Number of reassociations"); 52 53 static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned); 54 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned); 55 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, 56 const SimplifyQuery &, unsigned); 57 static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, 58 unsigned); 59 static Value *SimplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &, 60 const SimplifyQuery &, unsigned); 61 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &, 62 unsigned); 63 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 64 const SimplifyQuery &Q, unsigned MaxRecurse); 65 static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned); 66 static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned); 67 static Value *SimplifyCastInst(unsigned, Value *, Type *, 68 const SimplifyQuery &, unsigned); 69 static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, const SimplifyQuery &, 70 unsigned); 71 72 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal, 73 Value *FalseVal) { 74 BinaryOperator::BinaryOps BinOpCode; 75 if (auto *BO = dyn_cast<BinaryOperator>(Cond)) 76 BinOpCode = BO->getOpcode(); 77 else 78 return nullptr; 79 80 CmpInst::Predicate ExpectedPred, Pred1, Pred2; 81 if (BinOpCode == BinaryOperator::Or) { 82 ExpectedPred = ICmpInst::ICMP_NE; 83 } else if (BinOpCode == BinaryOperator::And) { 84 ExpectedPred = ICmpInst::ICMP_EQ; 85 } else 86 return nullptr; 87 88 // %A = icmp eq %TV, %FV 89 // %B = icmp eq %X, %Y (and one of these is a select operand) 90 // %C = and %A, %B 91 // %D = select %C, %TV, %FV 92 // --> 93 // %FV 94 95 // %A = icmp ne %TV, %FV 96 // %B = icmp ne %X, %Y (and one of these is a select operand) 97 // %C = or %A, %B 98 // %D = select %C, %TV, %FV 99 // --> 100 // %TV 101 Value *X, *Y; 102 if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal), 103 m_Specific(FalseVal)), 104 m_ICmp(Pred2, m_Value(X), m_Value(Y)))) || 105 Pred1 != Pred2 || Pred1 != ExpectedPred) 106 return nullptr; 107 108 if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal) 109 return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal; 110 111 return nullptr; 112 } 113 114 /// For a boolean type or a vector of boolean type, return false or a vector 115 /// with every element false. 116 static Constant *getFalse(Type *Ty) { 117 return ConstantInt::getFalse(Ty); 118 } 119 120 /// For a boolean type or a vector of boolean type, return true or a vector 121 /// with every element true. 122 static Constant *getTrue(Type *Ty) { 123 return ConstantInt::getTrue(Ty); 124 } 125 126 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"? 127 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, 128 Value *RHS) { 129 CmpInst *Cmp = dyn_cast<CmpInst>(V); 130 if (!Cmp) 131 return false; 132 CmpInst::Predicate CPred = Cmp->getPredicate(); 133 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1); 134 if (CPred == Pred && CLHS == LHS && CRHS == RHS) 135 return true; 136 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS && 137 CRHS == LHS; 138 } 139 140 /// Simplify comparison with true or false branch of select: 141 /// %sel = select i1 %cond, i32 %tv, i32 %fv 142 /// %cmp = icmp sle i32 %sel, %rhs 143 /// Compose new comparison by substituting %sel with either %tv or %fv 144 /// and see if it simplifies. 145 static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS, 146 Value *RHS, Value *Cond, 147 const SimplifyQuery &Q, unsigned MaxRecurse, 148 Constant *TrueOrFalse) { 149 Value *SimplifiedCmp = SimplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse); 150 if (SimplifiedCmp == Cond) { 151 // %cmp simplified to the select condition (%cond). 152 return TrueOrFalse; 153 } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) { 154 // It didn't simplify. However, if composed comparison is equivalent 155 // to the select condition (%cond) then we can replace it. 156 return TrueOrFalse; 157 } 158 return SimplifiedCmp; 159 } 160 161 /// Simplify comparison with true branch of select 162 static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS, 163 Value *RHS, Value *Cond, 164 const SimplifyQuery &Q, 165 unsigned MaxRecurse) { 166 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse, 167 getTrue(Cond->getType())); 168 } 169 170 /// Simplify comparison with false branch of select 171 static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS, 172 Value *RHS, Value *Cond, 173 const SimplifyQuery &Q, 174 unsigned MaxRecurse) { 175 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse, 176 getFalse(Cond->getType())); 177 } 178 179 /// We know comparison with both branches of select can be simplified, but they 180 /// are not equal. This routine handles some logical simplifications. 181 static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, 182 Value *Cond, 183 const SimplifyQuery &Q, 184 unsigned MaxRecurse) { 185 // If the false value simplified to false, then the result of the compare 186 // is equal to "Cond && TCmp". This also catches the case when the false 187 // value simplified to false and the true value to true, returning "Cond". 188 if (match(FCmp, m_Zero())) 189 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse)) 190 return V; 191 // If the true value simplified to true, then the result of the compare 192 // is equal to "Cond || FCmp". 193 if (match(TCmp, m_One())) 194 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse)) 195 return V; 196 // Finally, if the false value simplified to true and the true value to 197 // false, then the result of the compare is equal to "!Cond". 198 if (match(FCmp, m_One()) && match(TCmp, m_Zero())) 199 if (Value *V = SimplifyXorInst( 200 Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse)) 201 return V; 202 return nullptr; 203 } 204 205 /// Does the given value dominate the specified phi node? 206 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) { 207 Instruction *I = dyn_cast<Instruction>(V); 208 if (!I) 209 // Arguments and constants dominate all instructions. 210 return true; 211 212 // If we are processing instructions (and/or basic blocks) that have not been 213 // fully added to a function, the parent nodes may still be null. Simply 214 // return the conservative answer in these cases. 215 if (!I->getParent() || !P->getParent() || !I->getFunction()) 216 return false; 217 218 // If we have a DominatorTree then do a precise test. 219 if (DT) 220 return DT->dominates(I, P); 221 222 // Otherwise, if the instruction is in the entry block and is not an invoke, 223 // then it obviously dominates all phi nodes. 224 if (I->getParent() == &I->getFunction()->getEntryBlock() && 225 !isa<InvokeInst>(I)) 226 return true; 227 228 return false; 229 } 230 231 /// Simplify "A op (B op' C)" by distributing op over op', turning it into 232 /// "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is 233 /// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS. 234 /// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)". 235 /// Returns the simplified value, or null if no simplification was performed. 236 static Value *ExpandBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, 237 Instruction::BinaryOps OpcodeToExpand, 238 const SimplifyQuery &Q, unsigned MaxRecurse) { 239 // Recursion is always used, so bail out at once if we already hit the limit. 240 if (!MaxRecurse--) 241 return nullptr; 242 243 // Check whether the expression has the form "(A op' B) op C". 244 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS)) 245 if (Op0->getOpcode() == OpcodeToExpand) { 246 // It does! Try turning it into "(A op C) op' (B op C)". 247 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 248 // Do "A op C" and "B op C" both simplify? 249 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) 250 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 251 // They do! Return "L op' R" if it simplifies or is already available. 252 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 253 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand) 254 && L == B && R == A)) { 255 ++NumExpand; 256 return LHS; 257 } 258 // Otherwise return "L op' R" if it simplifies. 259 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 260 ++NumExpand; 261 return V; 262 } 263 } 264 } 265 266 // Check whether the expression has the form "A op (B op' C)". 267 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS)) 268 if (Op1->getOpcode() == OpcodeToExpand) { 269 // It does! Try turning it into "(A op B) op' (A op C)". 270 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 271 // Do "A op B" and "A op C" both simplify? 272 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) 273 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) { 274 // They do! Return "L op' R" if it simplifies or is already available. 275 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 276 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand) 277 && L == C && R == B)) { 278 ++NumExpand; 279 return RHS; 280 } 281 // Otherwise return "L op' R" if it simplifies. 282 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 283 ++NumExpand; 284 return V; 285 } 286 } 287 } 288 289 return nullptr; 290 } 291 292 /// Generic simplifications for associative binary operations. 293 /// Returns the simpler value, or null if none was found. 294 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode, 295 Value *LHS, Value *RHS, 296 const SimplifyQuery &Q, 297 unsigned MaxRecurse) { 298 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!"); 299 300 // Recursion is always used, so bail out at once if we already hit the limit. 301 if (!MaxRecurse--) 302 return nullptr; 303 304 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 305 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 306 307 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely. 308 if (Op0 && Op0->getOpcode() == Opcode) { 309 Value *A = Op0->getOperand(0); 310 Value *B = Op0->getOperand(1); 311 Value *C = RHS; 312 313 // Does "B op C" simplify? 314 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 315 // It does! Return "A op V" if it simplifies or is already available. 316 // If V equals B then "A op V" is just the LHS. 317 if (V == B) return LHS; 318 // Otherwise return "A op V" if it simplifies. 319 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) { 320 ++NumReassoc; 321 return W; 322 } 323 } 324 } 325 326 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely. 327 if (Op1 && Op1->getOpcode() == Opcode) { 328 Value *A = LHS; 329 Value *B = Op1->getOperand(0); 330 Value *C = Op1->getOperand(1); 331 332 // Does "A op B" simplify? 333 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) { 334 // It does! Return "V op C" if it simplifies or is already available. 335 // If V equals B then "V op C" is just the RHS. 336 if (V == B) return RHS; 337 // Otherwise return "V op C" if it simplifies. 338 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) { 339 ++NumReassoc; 340 return W; 341 } 342 } 343 } 344 345 // The remaining transforms require commutativity as well as associativity. 346 if (!Instruction::isCommutative(Opcode)) 347 return nullptr; 348 349 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely. 350 if (Op0 && Op0->getOpcode() == Opcode) { 351 Value *A = Op0->getOperand(0); 352 Value *B = Op0->getOperand(1); 353 Value *C = RHS; 354 355 // Does "C op A" simplify? 356 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 357 // It does! Return "V op B" if it simplifies or is already available. 358 // If V equals A then "V op B" is just the LHS. 359 if (V == A) return LHS; 360 // Otherwise return "V op B" if it simplifies. 361 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) { 362 ++NumReassoc; 363 return W; 364 } 365 } 366 } 367 368 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely. 369 if (Op1 && Op1->getOpcode() == Opcode) { 370 Value *A = LHS; 371 Value *B = Op1->getOperand(0); 372 Value *C = Op1->getOperand(1); 373 374 // Does "C op A" simplify? 375 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 376 // It does! Return "B op V" if it simplifies or is already available. 377 // If V equals C then "B op V" is just the RHS. 378 if (V == C) return RHS; 379 // Otherwise return "B op V" if it simplifies. 380 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) { 381 ++NumReassoc; 382 return W; 383 } 384 } 385 } 386 387 return nullptr; 388 } 389 390 /// In the case of a binary operation with a select instruction as an operand, 391 /// try to simplify the binop by seeing whether evaluating it on both branches 392 /// of the select results in the same value. Returns the common value if so, 393 /// otherwise returns null. 394 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, 395 Value *RHS, const SimplifyQuery &Q, 396 unsigned MaxRecurse) { 397 // Recursion is always used, so bail out at once if we already hit the limit. 398 if (!MaxRecurse--) 399 return nullptr; 400 401 SelectInst *SI; 402 if (isa<SelectInst>(LHS)) { 403 SI = cast<SelectInst>(LHS); 404 } else { 405 assert(isa<SelectInst>(RHS) && "No select instruction operand!"); 406 SI = cast<SelectInst>(RHS); 407 } 408 409 // Evaluate the BinOp on the true and false branches of the select. 410 Value *TV; 411 Value *FV; 412 if (SI == LHS) { 413 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse); 414 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse); 415 } else { 416 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse); 417 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse); 418 } 419 420 // If they simplified to the same value, then return the common value. 421 // If they both failed to simplify then return null. 422 if (TV == FV) 423 return TV; 424 425 // If one branch simplified to undef, return the other one. 426 if (TV && isa<UndefValue>(TV)) 427 return FV; 428 if (FV && isa<UndefValue>(FV)) 429 return TV; 430 431 // If applying the operation did not change the true and false select values, 432 // then the result of the binop is the select itself. 433 if (TV == SI->getTrueValue() && FV == SI->getFalseValue()) 434 return SI; 435 436 // If one branch simplified and the other did not, and the simplified 437 // value is equal to the unsimplified one, return the simplified value. 438 // For example, select (cond, X, X & Z) & Z -> X & Z. 439 if ((FV && !TV) || (TV && !FV)) { 440 // Check that the simplified value has the form "X op Y" where "op" is the 441 // same as the original operation. 442 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV); 443 if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) { 444 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS". 445 // We already know that "op" is the same as for the simplified value. See 446 // if the operands match too. If so, return the simplified value. 447 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue(); 448 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS; 449 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch; 450 if (Simplified->getOperand(0) == UnsimplifiedLHS && 451 Simplified->getOperand(1) == UnsimplifiedRHS) 452 return Simplified; 453 if (Simplified->isCommutative() && 454 Simplified->getOperand(1) == UnsimplifiedLHS && 455 Simplified->getOperand(0) == UnsimplifiedRHS) 456 return Simplified; 457 } 458 } 459 460 return nullptr; 461 } 462 463 /// In the case of a comparison with a select instruction, try to simplify the 464 /// comparison by seeing whether both branches of the select result in the same 465 /// value. Returns the common value if so, otherwise returns null. 466 /// For example, if we have: 467 /// %tmp = select i1 %cmp, i32 1, i32 2 468 /// %cmp1 = icmp sle i32 %tmp, 3 469 /// We can simplify %cmp1 to true, because both branches of select are 470 /// less than 3. We compose new comparison by substituting %tmp with both 471 /// branches of select and see if it can be simplified. 472 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS, 473 Value *RHS, const SimplifyQuery &Q, 474 unsigned MaxRecurse) { 475 // Recursion is always used, so bail out at once if we already hit the limit. 476 if (!MaxRecurse--) 477 return nullptr; 478 479 // Make sure the select is on the LHS. 480 if (!isa<SelectInst>(LHS)) { 481 std::swap(LHS, RHS); 482 Pred = CmpInst::getSwappedPredicate(Pred); 483 } 484 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!"); 485 SelectInst *SI = cast<SelectInst>(LHS); 486 Value *Cond = SI->getCondition(); 487 Value *TV = SI->getTrueValue(); 488 Value *FV = SI->getFalseValue(); 489 490 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it. 491 // Does "cmp TV, RHS" simplify? 492 Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse); 493 if (!TCmp) 494 return nullptr; 495 496 // Does "cmp FV, RHS" simplify? 497 Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse); 498 if (!FCmp) 499 return nullptr; 500 501 // If both sides simplified to the same value, then use it as the result of 502 // the original comparison. 503 if (TCmp == FCmp) 504 return TCmp; 505 506 // The remaining cases only make sense if the select condition has the same 507 // type as the result of the comparison, so bail out if this is not so. 508 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy()) 509 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse); 510 511 return nullptr; 512 } 513 514 /// In the case of a binary operation with an operand that is a PHI instruction, 515 /// try to simplify the binop by seeing whether evaluating it on the incoming 516 /// phi values yields the same result for every value. If so returns the common 517 /// value, otherwise returns null. 518 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, 519 Value *RHS, const SimplifyQuery &Q, 520 unsigned MaxRecurse) { 521 // Recursion is always used, so bail out at once if we already hit the limit. 522 if (!MaxRecurse--) 523 return nullptr; 524 525 PHINode *PI; 526 if (isa<PHINode>(LHS)) { 527 PI = cast<PHINode>(LHS); 528 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 529 if (!valueDominatesPHI(RHS, PI, Q.DT)) 530 return nullptr; 531 } else { 532 assert(isa<PHINode>(RHS) && "No PHI instruction operand!"); 533 PI = cast<PHINode>(RHS); 534 // Bail out if LHS and the phi may be mutually interdependent due to a loop. 535 if (!valueDominatesPHI(LHS, PI, Q.DT)) 536 return nullptr; 537 } 538 539 // Evaluate the BinOp on the incoming phi values. 540 Value *CommonValue = nullptr; 541 for (Value *Incoming : PI->incoming_values()) { 542 // If the incoming value is the phi node itself, it can safely be skipped. 543 if (Incoming == PI) continue; 544 Value *V = PI == LHS ? 545 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) : 546 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse); 547 // If the operation failed to simplify, or simplified to a different value 548 // to previously, then give up. 549 if (!V || (CommonValue && V != CommonValue)) 550 return nullptr; 551 CommonValue = V; 552 } 553 554 return CommonValue; 555 } 556 557 /// In the case of a comparison with a PHI instruction, try to simplify the 558 /// comparison by seeing whether comparing with all of the incoming phi values 559 /// yields the same result every time. If so returns the common result, 560 /// otherwise returns null. 561 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS, 562 const SimplifyQuery &Q, unsigned MaxRecurse) { 563 // Recursion is always used, so bail out at once if we already hit the limit. 564 if (!MaxRecurse--) 565 return nullptr; 566 567 // Make sure the phi is on the LHS. 568 if (!isa<PHINode>(LHS)) { 569 std::swap(LHS, RHS); 570 Pred = CmpInst::getSwappedPredicate(Pred); 571 } 572 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!"); 573 PHINode *PI = cast<PHINode>(LHS); 574 575 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 576 if (!valueDominatesPHI(RHS, PI, Q.DT)) 577 return nullptr; 578 579 // Evaluate the BinOp on the incoming phi values. 580 Value *CommonValue = nullptr; 581 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) { 582 Value *Incoming = PI->getIncomingValue(u); 583 Instruction *InTI = PI->getIncomingBlock(u)->getTerminator(); 584 // If the incoming value is the phi node itself, it can safely be skipped. 585 if (Incoming == PI) continue; 586 // Change the context instruction to the "edge" that flows into the phi. 587 // This is important because that is where incoming is actually "evaluated" 588 // even though it is used later somewhere else. 589 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI), 590 MaxRecurse); 591 // If the operation failed to simplify, or simplified to a different value 592 // to previously, then give up. 593 if (!V || (CommonValue && V != CommonValue)) 594 return nullptr; 595 CommonValue = V; 596 } 597 598 return CommonValue; 599 } 600 601 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode, 602 Value *&Op0, Value *&Op1, 603 const SimplifyQuery &Q) { 604 if (auto *CLHS = dyn_cast<Constant>(Op0)) { 605 if (auto *CRHS = dyn_cast<Constant>(Op1)) 606 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL); 607 608 // Canonicalize the constant to the RHS if this is a commutative operation. 609 if (Instruction::isCommutative(Opcode)) 610 std::swap(Op0, Op1); 611 } 612 return nullptr; 613 } 614 615 /// Given operands for an Add, see if we can fold the result. 616 /// If not, this returns null. 617 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, 618 const SimplifyQuery &Q, unsigned MaxRecurse) { 619 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q)) 620 return C; 621 622 // X + undef -> undef 623 if (match(Op1, m_Undef())) 624 return Op1; 625 626 // X + 0 -> X 627 if (match(Op1, m_Zero())) 628 return Op0; 629 630 // If two operands are negative, return 0. 631 if (isKnownNegation(Op0, Op1)) 632 return Constant::getNullValue(Op0->getType()); 633 634 // X + (Y - X) -> Y 635 // (Y - X) + X -> Y 636 // Eg: X + -X -> 0 637 Value *Y = nullptr; 638 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) || 639 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1)))) 640 return Y; 641 642 // X + ~X -> -1 since ~X = -X-1 643 Type *Ty = Op0->getType(); 644 if (match(Op0, m_Not(m_Specific(Op1))) || 645 match(Op1, m_Not(m_Specific(Op0)))) 646 return Constant::getAllOnesValue(Ty); 647 648 // add nsw/nuw (xor Y, signmask), signmask --> Y 649 // The no-wrapping add guarantees that the top bit will be set by the add. 650 // Therefore, the xor must be clearing the already set sign bit of Y. 651 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) && 652 match(Op0, m_Xor(m_Value(Y), m_SignMask()))) 653 return Y; 654 655 // add nuw %x, -1 -> -1, because %x can only be 0. 656 if (IsNUW && match(Op1, m_AllOnes())) 657 return Op1; // Which is -1. 658 659 /// i1 add -> xor. 660 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) 661 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 662 return V; 663 664 // Try some generic simplifications for associative operations. 665 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, 666 MaxRecurse)) 667 return V; 668 669 // Threading Add over selects and phi nodes is pointless, so don't bother. 670 // Threading over the select in "A + select(cond, B, C)" means evaluating 671 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and 672 // only if B and C are equal. If B and C are equal then (since we assume 673 // that operands have already been simplified) "select(cond, B, C)" should 674 // have been simplified to the common value of B and C already. Analysing 675 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly 676 // for threading over phi nodes. 677 678 return nullptr; 679 } 680 681 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, 682 const SimplifyQuery &Query) { 683 return ::SimplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit); 684 } 685 686 /// Compute the base pointer and cumulative constant offsets for V. 687 /// 688 /// This strips all constant offsets off of V, leaving it the base pointer, and 689 /// accumulates the total constant offset applied in the returned constant. It 690 /// returns 0 if V is not a pointer, and returns the constant '0' if there are 691 /// no constant offsets applied. 692 /// 693 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't 694 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc. 695 /// folding. 696 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, 697 bool AllowNonInbounds = false) { 698 assert(V->getType()->isPtrOrPtrVectorTy()); 699 700 Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType(); 701 APInt Offset = APInt::getNullValue(IntIdxTy->getIntegerBitWidth()); 702 703 V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds); 704 // As that strip may trace through `addrspacecast`, need to sext or trunc 705 // the offset calculated. 706 IntIdxTy = DL.getIndexType(V->getType())->getScalarType(); 707 Offset = Offset.sextOrTrunc(IntIdxTy->getIntegerBitWidth()); 708 709 Constant *OffsetIntPtr = ConstantInt::get(IntIdxTy, Offset); 710 if (V->getType()->isVectorTy()) 711 return ConstantVector::getSplat(V->getType()->getVectorNumElements(), 712 OffsetIntPtr); 713 return OffsetIntPtr; 714 } 715 716 /// Compute the constant difference between two pointer values. 717 /// If the difference is not a constant, returns zero. 718 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS, 719 Value *RHS) { 720 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 721 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 722 723 // If LHS and RHS are not related via constant offsets to the same base 724 // value, there is nothing we can do here. 725 if (LHS != RHS) 726 return nullptr; 727 728 // Otherwise, the difference of LHS - RHS can be computed as: 729 // LHS - RHS 730 // = (LHSOffset + Base) - (RHSOffset + Base) 731 // = LHSOffset - RHSOffset 732 return ConstantExpr::getSub(LHSOffset, RHSOffset); 733 } 734 735 /// Given operands for a Sub, see if we can fold the result. 736 /// If not, this returns null. 737 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 738 const SimplifyQuery &Q, unsigned MaxRecurse) { 739 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q)) 740 return C; 741 742 // X - undef -> undef 743 // undef - X -> undef 744 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 745 return UndefValue::get(Op0->getType()); 746 747 // X - 0 -> X 748 if (match(Op1, m_Zero())) 749 return Op0; 750 751 // X - X -> 0 752 if (Op0 == Op1) 753 return Constant::getNullValue(Op0->getType()); 754 755 // Is this a negation? 756 if (match(Op0, m_Zero())) { 757 // 0 - X -> 0 if the sub is NUW. 758 if (isNUW) 759 return Constant::getNullValue(Op0->getType()); 760 761 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 762 if (Known.Zero.isMaxSignedValue()) { 763 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then 764 // Op1 must be 0 because negating the minimum signed value is undefined. 765 if (isNSW) 766 return Constant::getNullValue(Op0->getType()); 767 768 // 0 - X -> X if X is 0 or the minimum signed value. 769 return Op1; 770 } 771 } 772 773 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies. 774 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X 775 Value *X = nullptr, *Y = nullptr, *Z = Op1; 776 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z 777 // See if "V === Y - Z" simplifies. 778 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1)) 779 // It does! Now see if "X + V" simplifies. 780 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) { 781 // It does, we successfully reassociated! 782 ++NumReassoc; 783 return W; 784 } 785 // See if "V === X - Z" simplifies. 786 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 787 // It does! Now see if "Y + V" simplifies. 788 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) { 789 // It does, we successfully reassociated! 790 ++NumReassoc; 791 return W; 792 } 793 } 794 795 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies. 796 // For example, X - (X + 1) -> -1 797 X = Op0; 798 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z) 799 // See if "V === X - Y" simplifies. 800 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 801 // It does! Now see if "V - Z" simplifies. 802 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) { 803 // It does, we successfully reassociated! 804 ++NumReassoc; 805 return W; 806 } 807 // See if "V === X - Z" simplifies. 808 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 809 // It does! Now see if "V - Y" simplifies. 810 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) { 811 // It does, we successfully reassociated! 812 ++NumReassoc; 813 return W; 814 } 815 } 816 817 // Z - (X - Y) -> (Z - X) + Y if everything simplifies. 818 // For example, X - (X - Y) -> Y. 819 Z = Op0; 820 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y) 821 // See if "V === Z - X" simplifies. 822 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1)) 823 // It does! Now see if "V + Y" simplifies. 824 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) { 825 // It does, we successfully reassociated! 826 ++NumReassoc; 827 return W; 828 } 829 830 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies. 831 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) && 832 match(Op1, m_Trunc(m_Value(Y)))) 833 if (X->getType() == Y->getType()) 834 // See if "V === X - Y" simplifies. 835 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 836 // It does! Now see if "trunc V" simplifies. 837 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(), 838 Q, MaxRecurse - 1)) 839 // It does, return the simplified "trunc V". 840 return W; 841 842 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...). 843 if (match(Op0, m_PtrToInt(m_Value(X))) && 844 match(Op1, m_PtrToInt(m_Value(Y)))) 845 if (Constant *Result = computePointerDifference(Q.DL, X, Y)) 846 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true); 847 848 // i1 sub -> xor. 849 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) 850 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 851 return V; 852 853 // Threading Sub over selects and phi nodes is pointless, so don't bother. 854 // Threading over the select in "A - select(cond, B, C)" means evaluating 855 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and 856 // only if B and C are equal. If B and C are equal then (since we assume 857 // that operands have already been simplified) "select(cond, B, C)" should 858 // have been simplified to the common value of B and C already. Analysing 859 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly 860 // for threading over phi nodes. 861 862 return nullptr; 863 } 864 865 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 866 const SimplifyQuery &Q) { 867 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit); 868 } 869 870 /// Given operands for a Mul, see if we can fold the result. 871 /// If not, this returns null. 872 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 873 unsigned MaxRecurse) { 874 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q)) 875 return C; 876 877 // X * undef -> 0 878 // X * 0 -> 0 879 if (match(Op1, m_CombineOr(m_Undef(), m_Zero()))) 880 return Constant::getNullValue(Op0->getType()); 881 882 // X * 1 -> X 883 if (match(Op1, m_One())) 884 return Op0; 885 886 // (X / Y) * Y -> X if the division is exact. 887 Value *X = nullptr; 888 if (Q.IIQ.UseInstrInfo && 889 (match(Op0, 890 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y 891 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y) 892 return X; 893 894 // i1 mul -> and. 895 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) 896 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1)) 897 return V; 898 899 // Try some generic simplifications for associative operations. 900 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, 901 MaxRecurse)) 902 return V; 903 904 // Mul distributes over Add. Try some generic simplifications based on this. 905 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add, 906 Q, MaxRecurse)) 907 return V; 908 909 // If the operation is with the result of a select instruction, check whether 910 // operating on either branch of the select always yields the same value. 911 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 912 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, 913 MaxRecurse)) 914 return V; 915 916 // If the operation is with the result of a phi instruction, check whether 917 // operating on all incoming values of the phi always yields the same value. 918 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 919 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, 920 MaxRecurse)) 921 return V; 922 923 return nullptr; 924 } 925 926 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 927 return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit); 928 } 929 930 /// Check for common or similar folds of integer division or integer remainder. 931 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem). 932 static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) { 933 Type *Ty = Op0->getType(); 934 935 // X / undef -> undef 936 // X % undef -> undef 937 if (match(Op1, m_Undef())) 938 return Op1; 939 940 // X / 0 -> undef 941 // X % 0 -> undef 942 // We don't need to preserve faults! 943 if (match(Op1, m_Zero())) 944 return UndefValue::get(Ty); 945 946 // If any element of a constant divisor vector is zero or undef, the whole op 947 // is undef. 948 auto *Op1C = dyn_cast<Constant>(Op1); 949 if (Op1C && Ty->isVectorTy()) { 950 unsigned NumElts = Ty->getVectorNumElements(); 951 for (unsigned i = 0; i != NumElts; ++i) { 952 Constant *Elt = Op1C->getAggregateElement(i); 953 if (Elt && (Elt->isNullValue() || isa<UndefValue>(Elt))) 954 return UndefValue::get(Ty); 955 } 956 } 957 958 // undef / X -> 0 959 // undef % X -> 0 960 if (match(Op0, m_Undef())) 961 return Constant::getNullValue(Ty); 962 963 // 0 / X -> 0 964 // 0 % X -> 0 965 if (match(Op0, m_Zero())) 966 return Constant::getNullValue(Op0->getType()); 967 968 // X / X -> 1 969 // X % X -> 0 970 if (Op0 == Op1) 971 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty); 972 973 // X / 1 -> X 974 // X % 1 -> 0 975 // If this is a boolean op (single-bit element type), we can't have 976 // division-by-zero or remainder-by-zero, so assume the divisor is 1. 977 // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1. 978 Value *X; 979 if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1) || 980 (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) 981 return IsDiv ? Op0 : Constant::getNullValue(Ty); 982 983 return nullptr; 984 } 985 986 /// Given a predicate and two operands, return true if the comparison is true. 987 /// This is a helper for div/rem simplification where we return some other value 988 /// when we can prove a relationship between the operands. 989 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS, 990 const SimplifyQuery &Q, unsigned MaxRecurse) { 991 Value *V = SimplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse); 992 Constant *C = dyn_cast_or_null<Constant>(V); 993 return (C && C->isAllOnesValue()); 994 } 995 996 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer 997 /// to simplify X % Y to X. 998 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, 999 unsigned MaxRecurse, bool IsSigned) { 1000 // Recursion is always used, so bail out at once if we already hit the limit. 1001 if (!MaxRecurse--) 1002 return false; 1003 1004 if (IsSigned) { 1005 // |X| / |Y| --> 0 1006 // 1007 // We require that 1 operand is a simple constant. That could be extended to 1008 // 2 variables if we computed the sign bit for each. 1009 // 1010 // Make sure that a constant is not the minimum signed value because taking 1011 // the abs() of that is undefined. 1012 Type *Ty = X->getType(); 1013 const APInt *C; 1014 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) { 1015 // Is the variable divisor magnitude always greater than the constant 1016 // dividend magnitude? 1017 // |Y| > |C| --> Y < -abs(C) or Y > abs(C) 1018 Constant *PosDividendC = ConstantInt::get(Ty, C->abs()); 1019 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs()); 1020 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) || 1021 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse)) 1022 return true; 1023 } 1024 if (match(Y, m_APInt(C))) { 1025 // Special-case: we can't take the abs() of a minimum signed value. If 1026 // that's the divisor, then all we have to do is prove that the dividend 1027 // is also not the minimum signed value. 1028 if (C->isMinSignedValue()) 1029 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse); 1030 1031 // Is the variable dividend magnitude always less than the constant 1032 // divisor magnitude? 1033 // |X| < |C| --> X > -abs(C) and X < abs(C) 1034 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs()); 1035 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs()); 1036 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) && 1037 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse)) 1038 return true; 1039 } 1040 return false; 1041 } 1042 1043 // IsSigned == false. 1044 // Is the dividend unsigned less than the divisor? 1045 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse); 1046 } 1047 1048 /// These are simplifications common to SDiv and UDiv. 1049 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 1050 const SimplifyQuery &Q, unsigned MaxRecurse) { 1051 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1052 return C; 1053 1054 if (Value *V = simplifyDivRem(Op0, Op1, true)) 1055 return V; 1056 1057 bool IsSigned = Opcode == Instruction::SDiv; 1058 1059 // (X * Y) / Y -> X if the multiplication does not overflow. 1060 Value *X; 1061 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) { 1062 auto *Mul = cast<OverflowingBinaryOperator>(Op0); 1063 // If the Mul does not overflow, then we are good to go. 1064 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) || 1065 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul))) 1066 return X; 1067 // If X has the form X = A / Y, then X * Y cannot overflow. 1068 if ((IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) || 1069 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) 1070 return X; 1071 } 1072 1073 // (X rem Y) / Y -> 0 1074 if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 1075 (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 1076 return Constant::getNullValue(Op0->getType()); 1077 1078 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow 1079 ConstantInt *C1, *C2; 1080 if (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) && 1081 match(Op1, m_ConstantInt(C2))) { 1082 bool Overflow; 1083 (void)C1->getValue().umul_ov(C2->getValue(), Overflow); 1084 if (Overflow) 1085 return Constant::getNullValue(Op0->getType()); 1086 } 1087 1088 // If the operation is with the result of a select instruction, check whether 1089 // operating on either branch of the select always yields the same value. 1090 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1091 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1092 return V; 1093 1094 // If the operation is with the result of a phi instruction, check whether 1095 // operating on all incoming values of the phi always yields the same value. 1096 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1097 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1098 return V; 1099 1100 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned)) 1101 return Constant::getNullValue(Op0->getType()); 1102 1103 return nullptr; 1104 } 1105 1106 /// These are simplifications common to SRem and URem. 1107 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 1108 const SimplifyQuery &Q, unsigned MaxRecurse) { 1109 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1110 return C; 1111 1112 if (Value *V = simplifyDivRem(Op0, Op1, false)) 1113 return V; 1114 1115 // (X % Y) % Y -> X % Y 1116 if ((Opcode == Instruction::SRem && 1117 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 1118 (Opcode == Instruction::URem && 1119 match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 1120 return Op0; 1121 1122 // (X << Y) % X -> 0 1123 if (Q.IIQ.UseInstrInfo && 1124 ((Opcode == Instruction::SRem && 1125 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) || 1126 (Opcode == Instruction::URem && 1127 match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))) 1128 return Constant::getNullValue(Op0->getType()); 1129 1130 // If the operation is with the result of a select instruction, check whether 1131 // operating on either branch of the select always yields the same value. 1132 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1133 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1134 return V; 1135 1136 // If the operation is with the result of a phi instruction, check whether 1137 // operating on all incoming values of the phi always yields the same value. 1138 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1139 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1140 return V; 1141 1142 // If X / Y == 0, then X % Y == X. 1143 if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem)) 1144 return Op0; 1145 1146 return nullptr; 1147 } 1148 1149 /// Given operands for an SDiv, see if we can fold the result. 1150 /// If not, this returns null. 1151 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1152 unsigned MaxRecurse) { 1153 // If two operands are negated and no signed overflow, return -1. 1154 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true)) 1155 return Constant::getAllOnesValue(Op0->getType()); 1156 1157 return simplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse); 1158 } 1159 1160 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1161 return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit); 1162 } 1163 1164 /// Given operands for a UDiv, see if we can fold the result. 1165 /// If not, this returns null. 1166 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1167 unsigned MaxRecurse) { 1168 return simplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse); 1169 } 1170 1171 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1172 return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit); 1173 } 1174 1175 /// Given operands for an SRem, see if we can fold the result. 1176 /// If not, this returns null. 1177 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1178 unsigned MaxRecurse) { 1179 // If the divisor is 0, the result is undefined, so assume the divisor is -1. 1180 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0 1181 Value *X; 1182 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 1183 return ConstantInt::getNullValue(Op0->getType()); 1184 1185 // If the two operands are negated, return 0. 1186 if (isKnownNegation(Op0, Op1)) 1187 return ConstantInt::getNullValue(Op0->getType()); 1188 1189 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse); 1190 } 1191 1192 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1193 return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit); 1194 } 1195 1196 /// Given operands for a URem, see if we can fold the result. 1197 /// If not, this returns null. 1198 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1199 unsigned MaxRecurse) { 1200 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse); 1201 } 1202 1203 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1204 return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit); 1205 } 1206 1207 /// Returns true if a shift by \c Amount always yields undef. 1208 static bool isUndefShift(Value *Amount) { 1209 Constant *C = dyn_cast<Constant>(Amount); 1210 if (!C) 1211 return false; 1212 1213 // X shift by undef -> undef because it may shift by the bitwidth. 1214 if (isa<UndefValue>(C)) 1215 return true; 1216 1217 // Shifting by the bitwidth or more is undefined. 1218 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1219 if (CI->getValue().getLimitedValue() >= 1220 CI->getType()->getScalarSizeInBits()) 1221 return true; 1222 1223 // If all lanes of a vector shift are undefined the whole shift is. 1224 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) { 1225 for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I) 1226 if (!isUndefShift(C->getAggregateElement(I))) 1227 return false; 1228 return true; 1229 } 1230 1231 return false; 1232 } 1233 1234 /// Given operands for an Shl, LShr or AShr, see if we can fold the result. 1235 /// If not, this returns null. 1236 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0, 1237 Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse) { 1238 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1239 return C; 1240 1241 // 0 shift by X -> 0 1242 if (match(Op0, m_Zero())) 1243 return Constant::getNullValue(Op0->getType()); 1244 1245 // X shift by 0 -> X 1246 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones 1247 // would be poison. 1248 Value *X; 1249 if (match(Op1, m_Zero()) || 1250 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) 1251 return Op0; 1252 1253 // Fold undefined shifts. 1254 if (isUndefShift(Op1)) 1255 return UndefValue::get(Op0->getType()); 1256 1257 // If the operation is with the result of a select instruction, check whether 1258 // operating on either branch of the select always yields the same value. 1259 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1260 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1261 return V; 1262 1263 // If the operation is with the result of a phi instruction, check whether 1264 // operating on all incoming values of the phi always yields the same value. 1265 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1266 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1267 return V; 1268 1269 // If any bits in the shift amount make that value greater than or equal to 1270 // the number of bits in the type, the shift is undefined. 1271 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1272 if (Known.One.getLimitedValue() >= Known.getBitWidth()) 1273 return UndefValue::get(Op0->getType()); 1274 1275 // If all valid bits in the shift amount are known zero, the first operand is 1276 // unchanged. 1277 unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth()); 1278 if (Known.countMinTrailingZeros() >= NumValidShiftBits) 1279 return Op0; 1280 1281 return nullptr; 1282 } 1283 1284 /// Given operands for an Shl, LShr or AShr, see if we can 1285 /// fold the result. If not, this returns null. 1286 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, 1287 Value *Op1, bool isExact, const SimplifyQuery &Q, 1288 unsigned MaxRecurse) { 1289 if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse)) 1290 return V; 1291 1292 // X >> X -> 0 1293 if (Op0 == Op1) 1294 return Constant::getNullValue(Op0->getType()); 1295 1296 // undef >> X -> 0 1297 // undef >> X -> undef (if it's exact) 1298 if (match(Op0, m_Undef())) 1299 return isExact ? Op0 : Constant::getNullValue(Op0->getType()); 1300 1301 // The low bit cannot be shifted out of an exact shift if it is set. 1302 if (isExact) { 1303 KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT); 1304 if (Op0Known.One[0]) 1305 return Op0; 1306 } 1307 1308 return nullptr; 1309 } 1310 1311 /// Given operands for an Shl, see if we can fold the result. 1312 /// If not, this returns null. 1313 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 1314 const SimplifyQuery &Q, unsigned MaxRecurse) { 1315 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse)) 1316 return V; 1317 1318 // undef << X -> 0 1319 // undef << X -> undef if (if it's NSW/NUW) 1320 if (match(Op0, m_Undef())) 1321 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType()); 1322 1323 // (X >> A) << A -> X 1324 Value *X; 1325 if (Q.IIQ.UseInstrInfo && 1326 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1))))) 1327 return X; 1328 1329 // shl nuw i8 C, %x -> C iff C has sign bit set. 1330 if (isNUW && match(Op0, m_Negative())) 1331 return Op0; 1332 // NOTE: could use computeKnownBits() / LazyValueInfo, 1333 // but the cost-benefit analysis suggests it isn't worth it. 1334 1335 return nullptr; 1336 } 1337 1338 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 1339 const SimplifyQuery &Q) { 1340 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit); 1341 } 1342 1343 /// Given operands for an LShr, see if we can fold the result. 1344 /// If not, this returns null. 1345 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 1346 const SimplifyQuery &Q, unsigned MaxRecurse) { 1347 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q, 1348 MaxRecurse)) 1349 return V; 1350 1351 // (X << A) >> A -> X 1352 Value *X; 1353 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1)))) 1354 return X; 1355 1356 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A. 1357 // We can return X as we do in the above case since OR alters no bits in X. 1358 // SimplifyDemandedBits in InstCombine can do more general optimization for 1359 // bit manipulation. This pattern aims to provide opportunities for other 1360 // optimizers by supporting a simple but common case in InstSimplify. 1361 Value *Y; 1362 const APInt *ShRAmt, *ShLAmt; 1363 if (match(Op1, m_APInt(ShRAmt)) && 1364 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) && 1365 *ShRAmt == *ShLAmt) { 1366 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1367 const unsigned Width = Op0->getType()->getScalarSizeInBits(); 1368 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros(); 1369 if (ShRAmt->uge(EffWidthY)) 1370 return X; 1371 } 1372 1373 return nullptr; 1374 } 1375 1376 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 1377 const SimplifyQuery &Q) { 1378 return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit); 1379 } 1380 1381 /// Given operands for an AShr, see if we can fold the result. 1382 /// If not, this returns null. 1383 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 1384 const SimplifyQuery &Q, unsigned MaxRecurse) { 1385 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q, 1386 MaxRecurse)) 1387 return V; 1388 1389 // all ones >>a X -> -1 1390 // Do not return Op0 because it may contain undef elements if it's a vector. 1391 if (match(Op0, m_AllOnes())) 1392 return Constant::getAllOnesValue(Op0->getType()); 1393 1394 // (X << A) >> A -> X 1395 Value *X; 1396 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1)))) 1397 return X; 1398 1399 // Arithmetic shifting an all-sign-bit value is a no-op. 1400 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1401 if (NumSignBits == Op0->getType()->getScalarSizeInBits()) 1402 return Op0; 1403 1404 return nullptr; 1405 } 1406 1407 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 1408 const SimplifyQuery &Q) { 1409 return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit); 1410 } 1411 1412 /// Commuted variants are assumed to be handled by calling this function again 1413 /// with the parameters swapped. 1414 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, 1415 ICmpInst *UnsignedICmp, bool IsAnd, 1416 const SimplifyQuery &Q) { 1417 Value *X, *Y; 1418 1419 ICmpInst::Predicate EqPred; 1420 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) || 1421 !ICmpInst::isEquality(EqPred)) 1422 return nullptr; 1423 1424 ICmpInst::Predicate UnsignedPred; 1425 1426 Value *A, *B; 1427 // Y = (A - B); 1428 if (match(Y, m_Sub(m_Value(A), m_Value(B)))) { 1429 if (match(UnsignedICmp, 1430 m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) && 1431 ICmpInst::isUnsigned(UnsignedPred)) { 1432 // A >=/<= B || (A - B) != 0 <--> true 1433 if ((UnsignedPred == ICmpInst::ICMP_UGE || 1434 UnsignedPred == ICmpInst::ICMP_ULE) && 1435 EqPred == ICmpInst::ICMP_NE && !IsAnd) 1436 return ConstantInt::getTrue(UnsignedICmp->getType()); 1437 // A </> B && (A - B) == 0 <--> false 1438 if ((UnsignedPred == ICmpInst::ICMP_ULT || 1439 UnsignedPred == ICmpInst::ICMP_UGT) && 1440 EqPred == ICmpInst::ICMP_EQ && IsAnd) 1441 return ConstantInt::getFalse(UnsignedICmp->getType()); 1442 1443 // A </> B && (A - B) != 0 <--> A </> B 1444 // A </> B || (A - B) != 0 <--> (A - B) != 0 1445 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT || 1446 UnsignedPred == ICmpInst::ICMP_UGT)) 1447 return IsAnd ? UnsignedICmp : ZeroICmp; 1448 1449 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0 1450 // A <=/>= B || (A - B) == 0 <--> A <=/>= B 1451 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE || 1452 UnsignedPred == ICmpInst::ICMP_UGE)) 1453 return IsAnd ? ZeroICmp : UnsignedICmp; 1454 } 1455 1456 // Given Y = (A - B) 1457 // Y >= A && Y != 0 --> Y >= A iff B != 0 1458 // Y < A || Y == 0 --> Y < A iff B != 0 1459 if (match(UnsignedICmp, 1460 m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) { 1461 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd && 1462 EqPred == ICmpInst::ICMP_NE && 1463 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1464 return UnsignedICmp; 1465 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd && 1466 EqPred == ICmpInst::ICMP_EQ && 1467 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1468 return UnsignedICmp; 1469 } 1470 } 1471 1472 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) && 1473 ICmpInst::isUnsigned(UnsignedPred)) 1474 ; 1475 else if (match(UnsignedICmp, 1476 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) && 1477 ICmpInst::isUnsigned(UnsignedPred)) 1478 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred); 1479 else 1480 return nullptr; 1481 1482 // X < Y && Y != 0 --> X < Y 1483 // X < Y || Y != 0 --> Y != 0 1484 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE) 1485 return IsAnd ? UnsignedICmp : ZeroICmp; 1486 1487 // X <= Y && Y != 0 --> X <= Y iff X != 0 1488 // X <= Y || Y != 0 --> Y != 0 iff X != 0 1489 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE && 1490 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1491 return IsAnd ? UnsignedICmp : ZeroICmp; 1492 1493 // X >= Y && Y == 0 --> Y == 0 1494 // X >= Y || Y == 0 --> X >= Y 1495 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ) 1496 return IsAnd ? ZeroICmp : UnsignedICmp; 1497 1498 // X > Y && Y == 0 --> Y == 0 iff X != 0 1499 // X > Y || Y == 0 --> X > Y iff X != 0 1500 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ && 1501 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1502 return IsAnd ? ZeroICmp : UnsignedICmp; 1503 1504 // X < Y && Y == 0 --> false 1505 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ && 1506 IsAnd) 1507 return getFalse(UnsignedICmp->getType()); 1508 1509 // X >= Y || Y != 0 --> true 1510 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE && 1511 !IsAnd) 1512 return getTrue(UnsignedICmp->getType()); 1513 1514 return nullptr; 1515 } 1516 1517 /// Commuted variants are assumed to be handled by calling this function again 1518 /// with the parameters swapped. 1519 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { 1520 ICmpInst::Predicate Pred0, Pred1; 1521 Value *A ,*B; 1522 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || 1523 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) 1524 return nullptr; 1525 1526 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B). 1527 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we 1528 // can eliminate Op1 from this 'and'. 1529 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) 1530 return Op0; 1531 1532 // Check for any combination of predicates that are guaranteed to be disjoint. 1533 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || 1534 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) || 1535 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) || 1536 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)) 1537 return getFalse(Op0->getType()); 1538 1539 return nullptr; 1540 } 1541 1542 /// Commuted variants are assumed to be handled by calling this function again 1543 /// with the parameters swapped. 1544 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { 1545 ICmpInst::Predicate Pred0, Pred1; 1546 Value *A ,*B; 1547 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || 1548 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) 1549 return nullptr; 1550 1551 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B). 1552 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we 1553 // can eliminate Op0 from this 'or'. 1554 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) 1555 return Op1; 1556 1557 // Check for any combination of predicates that cover the entire range of 1558 // possibilities. 1559 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || 1560 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) || 1561 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) || 1562 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE)) 1563 return getTrue(Op0->getType()); 1564 1565 return nullptr; 1566 } 1567 1568 /// Test if a pair of compares with a shared operand and 2 constants has an 1569 /// empty set intersection, full set union, or if one compare is a superset of 1570 /// the other. 1571 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, 1572 bool IsAnd) { 1573 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)). 1574 if (Cmp0->getOperand(0) != Cmp1->getOperand(0)) 1575 return nullptr; 1576 1577 const APInt *C0, *C1; 1578 if (!match(Cmp0->getOperand(1), m_APInt(C0)) || 1579 !match(Cmp1->getOperand(1), m_APInt(C1))) 1580 return nullptr; 1581 1582 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0); 1583 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1); 1584 1585 // For and-of-compares, check if the intersection is empty: 1586 // (icmp X, C0) && (icmp X, C1) --> empty set --> false 1587 if (IsAnd && Range0.intersectWith(Range1).isEmptySet()) 1588 return getFalse(Cmp0->getType()); 1589 1590 // For or-of-compares, check if the union is full: 1591 // (icmp X, C0) || (icmp X, C1) --> full set --> true 1592 if (!IsAnd && Range0.unionWith(Range1).isFullSet()) 1593 return getTrue(Cmp0->getType()); 1594 1595 // Is one range a superset of the other? 1596 // If this is and-of-compares, take the smaller set: 1597 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42 1598 // If this is or-of-compares, take the larger set: 1599 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4 1600 if (Range0.contains(Range1)) 1601 return IsAnd ? Cmp1 : Cmp0; 1602 if (Range1.contains(Range0)) 1603 return IsAnd ? Cmp0 : Cmp1; 1604 1605 return nullptr; 1606 } 1607 1608 static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1, 1609 bool IsAnd) { 1610 ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate(); 1611 if (!match(Cmp0->getOperand(1), m_Zero()) || 1612 !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1) 1613 return nullptr; 1614 1615 if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ)) 1616 return nullptr; 1617 1618 // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)". 1619 Value *X = Cmp0->getOperand(0); 1620 Value *Y = Cmp1->getOperand(0); 1621 1622 // If one of the compares is a masked version of a (not) null check, then 1623 // that compare implies the other, so we eliminate the other. Optionally, look 1624 // through a pointer-to-int cast to match a null check of a pointer type. 1625 1626 // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0 1627 // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0 1628 // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0 1629 // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0 1630 if (match(Y, m_c_And(m_Specific(X), m_Value())) || 1631 match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value()))) 1632 return Cmp1; 1633 1634 // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0 1635 // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0 1636 // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0 1637 // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0 1638 if (match(X, m_c_And(m_Specific(Y), m_Value())) || 1639 match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value()))) 1640 return Cmp0; 1641 1642 return nullptr; 1643 } 1644 1645 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, 1646 const InstrInfoQuery &IIQ) { 1647 // (icmp (add V, C0), C1) & (icmp V, C0) 1648 ICmpInst::Predicate Pred0, Pred1; 1649 const APInt *C0, *C1; 1650 Value *V; 1651 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) 1652 return nullptr; 1653 1654 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) 1655 return nullptr; 1656 1657 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0)); 1658 if (AddInst->getOperand(1) != Op1->getOperand(1)) 1659 return nullptr; 1660 1661 Type *ITy = Op0->getType(); 1662 bool isNSW = IIQ.hasNoSignedWrap(AddInst); 1663 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst); 1664 1665 const APInt Delta = *C1 - *C0; 1666 if (C0->isStrictlyPositive()) { 1667 if (Delta == 2) { 1668 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT) 1669 return getFalse(ITy); 1670 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW) 1671 return getFalse(ITy); 1672 } 1673 if (Delta == 1) { 1674 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT) 1675 return getFalse(ITy); 1676 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW) 1677 return getFalse(ITy); 1678 } 1679 } 1680 if (C0->getBoolValue() && isNUW) { 1681 if (Delta == 2) 1682 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT) 1683 return getFalse(ITy); 1684 if (Delta == 1) 1685 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT) 1686 return getFalse(ITy); 1687 } 1688 1689 return nullptr; 1690 } 1691 1692 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, 1693 const SimplifyQuery &Q) { 1694 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q)) 1695 return X; 1696 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q)) 1697 return X; 1698 1699 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1)) 1700 return X; 1701 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0)) 1702 return X; 1703 1704 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true)) 1705 return X; 1706 1707 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true)) 1708 return X; 1709 1710 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ)) 1711 return X; 1712 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ)) 1713 return X; 1714 1715 return nullptr; 1716 } 1717 1718 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, 1719 const InstrInfoQuery &IIQ) { 1720 // (icmp (add V, C0), C1) | (icmp V, C0) 1721 ICmpInst::Predicate Pred0, Pred1; 1722 const APInt *C0, *C1; 1723 Value *V; 1724 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) 1725 return nullptr; 1726 1727 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) 1728 return nullptr; 1729 1730 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0)); 1731 if (AddInst->getOperand(1) != Op1->getOperand(1)) 1732 return nullptr; 1733 1734 Type *ITy = Op0->getType(); 1735 bool isNSW = IIQ.hasNoSignedWrap(AddInst); 1736 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst); 1737 1738 const APInt Delta = *C1 - *C0; 1739 if (C0->isStrictlyPositive()) { 1740 if (Delta == 2) { 1741 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE) 1742 return getTrue(ITy); 1743 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW) 1744 return getTrue(ITy); 1745 } 1746 if (Delta == 1) { 1747 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE) 1748 return getTrue(ITy); 1749 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW) 1750 return getTrue(ITy); 1751 } 1752 } 1753 if (C0->getBoolValue() && isNUW) { 1754 if (Delta == 2) 1755 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE) 1756 return getTrue(ITy); 1757 if (Delta == 1) 1758 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE) 1759 return getTrue(ITy); 1760 } 1761 1762 return nullptr; 1763 } 1764 1765 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, 1766 const SimplifyQuery &Q) { 1767 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q)) 1768 return X; 1769 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q)) 1770 return X; 1771 1772 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1)) 1773 return X; 1774 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0)) 1775 return X; 1776 1777 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false)) 1778 return X; 1779 1780 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false)) 1781 return X; 1782 1783 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ)) 1784 return X; 1785 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ)) 1786 return X; 1787 1788 return nullptr; 1789 } 1790 1791 static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI, 1792 FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { 1793 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 1794 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 1795 if (LHS0->getType() != RHS0->getType()) 1796 return nullptr; 1797 1798 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 1799 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) || 1800 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) { 1801 // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y 1802 // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X 1803 // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y 1804 // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X 1805 // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y 1806 // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X 1807 // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y 1808 // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X 1809 if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) || 1810 (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1))) 1811 return RHS; 1812 1813 // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y 1814 // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X 1815 // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y 1816 // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X 1817 // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y 1818 // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X 1819 // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y 1820 // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X 1821 if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) || 1822 (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1))) 1823 return LHS; 1824 } 1825 1826 return nullptr; 1827 } 1828 1829 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, 1830 Value *Op0, Value *Op1, bool IsAnd) { 1831 // Look through casts of the 'and' operands to find compares. 1832 auto *Cast0 = dyn_cast<CastInst>(Op0); 1833 auto *Cast1 = dyn_cast<CastInst>(Op1); 1834 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() && 1835 Cast0->getSrcTy() == Cast1->getSrcTy()) { 1836 Op0 = Cast0->getOperand(0); 1837 Op1 = Cast1->getOperand(0); 1838 } 1839 1840 Value *V = nullptr; 1841 auto *ICmp0 = dyn_cast<ICmpInst>(Op0); 1842 auto *ICmp1 = dyn_cast<ICmpInst>(Op1); 1843 if (ICmp0 && ICmp1) 1844 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q) 1845 : simplifyOrOfICmps(ICmp0, ICmp1, Q); 1846 1847 auto *FCmp0 = dyn_cast<FCmpInst>(Op0); 1848 auto *FCmp1 = dyn_cast<FCmpInst>(Op1); 1849 if (FCmp0 && FCmp1) 1850 V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd); 1851 1852 if (!V) 1853 return nullptr; 1854 if (!Cast0) 1855 return V; 1856 1857 // If we looked through casts, we can only handle a constant simplification 1858 // because we are not allowed to create a cast instruction here. 1859 if (auto *C = dyn_cast<Constant>(V)) 1860 return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType()); 1861 1862 return nullptr; 1863 } 1864 1865 /// Check that the Op1 is in expected form, i.e.: 1866 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???) 1867 /// %Op1 = extractvalue { i4, i1 } %Agg, 1 1868 static bool omitCheckForZeroBeforeMulWithOverflowInternal(Value *Op1, 1869 Value *X) { 1870 auto *Extract = dyn_cast<ExtractValueInst>(Op1); 1871 // We should only be extracting the overflow bit. 1872 if (!Extract || !Extract->getIndices().equals(1)) 1873 return false; 1874 Value *Agg = Extract->getAggregateOperand(); 1875 // This should be a multiplication-with-overflow intrinsic. 1876 if (!match(Agg, m_CombineOr(m_Intrinsic<Intrinsic::umul_with_overflow>(), 1877 m_Intrinsic<Intrinsic::smul_with_overflow>()))) 1878 return false; 1879 // One of its multipliers should be the value we checked for zero before. 1880 if (!match(Agg, m_CombineOr(m_Argument<0>(m_Specific(X)), 1881 m_Argument<1>(m_Specific(X))))) 1882 return false; 1883 return true; 1884 } 1885 1886 /// The @llvm.[us]mul.with.overflow intrinsic could have been folded from some 1887 /// other form of check, e.g. one that was using division; it may have been 1888 /// guarded against division-by-zero. We can drop that check now. 1889 /// Look for: 1890 /// %Op0 = icmp ne i4 %X, 0 1891 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???) 1892 /// %Op1 = extractvalue { i4, i1 } %Agg, 1 1893 /// %??? = and i1 %Op0, %Op1 1894 /// We can just return %Op1 1895 static Value *omitCheckForZeroBeforeMulWithOverflow(Value *Op0, Value *Op1) { 1896 ICmpInst::Predicate Pred; 1897 Value *X; 1898 if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())) || 1899 Pred != ICmpInst::Predicate::ICMP_NE) 1900 return nullptr; 1901 // Is Op1 in expected form? 1902 if (!omitCheckForZeroBeforeMulWithOverflowInternal(Op1, X)) 1903 return nullptr; 1904 // Can omit 'and', and just return the overflow bit. 1905 return Op1; 1906 } 1907 1908 /// The @llvm.[us]mul.with.overflow intrinsic could have been folded from some 1909 /// other form of check, e.g. one that was using division; it may have been 1910 /// guarded against division-by-zero. We can drop that check now. 1911 /// Look for: 1912 /// %Op0 = icmp eq i4 %X, 0 1913 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???) 1914 /// %Op1 = extractvalue { i4, i1 } %Agg, 1 1915 /// %NotOp1 = xor i1 %Op1, true 1916 /// %or = or i1 %Op0, %NotOp1 1917 /// We can just return %NotOp1 1918 static Value *omitCheckForZeroBeforeInvertedMulWithOverflow(Value *Op0, 1919 Value *NotOp1) { 1920 ICmpInst::Predicate Pred; 1921 Value *X; 1922 if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())) || 1923 Pred != ICmpInst::Predicate::ICMP_EQ) 1924 return nullptr; 1925 // We expect the other hand of an 'or' to be a 'not'. 1926 Value *Op1; 1927 if (!match(NotOp1, m_Not(m_Value(Op1)))) 1928 return nullptr; 1929 // Is Op1 in expected form? 1930 if (!omitCheckForZeroBeforeMulWithOverflowInternal(Op1, X)) 1931 return nullptr; 1932 // Can omit 'and', and just return the inverted overflow bit. 1933 return NotOp1; 1934 } 1935 1936 /// Given operands for an And, see if we can fold the result. 1937 /// If not, this returns null. 1938 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1939 unsigned MaxRecurse) { 1940 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q)) 1941 return C; 1942 1943 // X & undef -> 0 1944 if (match(Op1, m_Undef())) 1945 return Constant::getNullValue(Op0->getType()); 1946 1947 // X & X = X 1948 if (Op0 == Op1) 1949 return Op0; 1950 1951 // X & 0 = 0 1952 if (match(Op1, m_Zero())) 1953 return Constant::getNullValue(Op0->getType()); 1954 1955 // X & -1 = X 1956 if (match(Op1, m_AllOnes())) 1957 return Op0; 1958 1959 // A & ~A = ~A & A = 0 1960 if (match(Op0, m_Not(m_Specific(Op1))) || 1961 match(Op1, m_Not(m_Specific(Op0)))) 1962 return Constant::getNullValue(Op0->getType()); 1963 1964 // (A | ?) & A = A 1965 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value()))) 1966 return Op1; 1967 1968 // A & (A | ?) = A 1969 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value()))) 1970 return Op0; 1971 1972 // A mask that only clears known zeros of a shifted value is a no-op. 1973 Value *X; 1974 const APInt *Mask; 1975 const APInt *ShAmt; 1976 if (match(Op1, m_APInt(Mask))) { 1977 // If all bits in the inverted and shifted mask are clear: 1978 // and (shl X, ShAmt), Mask --> shl X, ShAmt 1979 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) && 1980 (~(*Mask)).lshr(*ShAmt).isNullValue()) 1981 return Op0; 1982 1983 // If all bits in the inverted and shifted mask are clear: 1984 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt 1985 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) && 1986 (~(*Mask)).shl(*ShAmt).isNullValue()) 1987 return Op0; 1988 } 1989 1990 // If we have a multiplication overflow check that is being 'and'ed with a 1991 // check that one of the multipliers is not zero, we can omit the 'and', and 1992 // only keep the overflow check. 1993 if (Value *V = omitCheckForZeroBeforeMulWithOverflow(Op0, Op1)) 1994 return V; 1995 if (Value *V = omitCheckForZeroBeforeMulWithOverflow(Op1, Op0)) 1996 return V; 1997 1998 // A & (-A) = A if A is a power of two or zero. 1999 if (match(Op0, m_Neg(m_Specific(Op1))) || 2000 match(Op1, m_Neg(m_Specific(Op0)))) { 2001 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, 2002 Q.DT)) 2003 return Op0; 2004 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, 2005 Q.DT)) 2006 return Op1; 2007 } 2008 2009 // This is a similar pattern used for checking if a value is a power-of-2: 2010 // (A - 1) & A --> 0 (if A is a power-of-2 or 0) 2011 // A & (A - 1) --> 0 (if A is a power-of-2 or 0) 2012 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) && 2013 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT)) 2014 return Constant::getNullValue(Op1->getType()); 2015 if (match(Op1, m_Add(m_Specific(Op0), m_AllOnes())) && 2016 isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT)) 2017 return Constant::getNullValue(Op0->getType()); 2018 2019 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true)) 2020 return V; 2021 2022 // Try some generic simplifications for associative operations. 2023 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, 2024 MaxRecurse)) 2025 return V; 2026 2027 // And distributes over Or. Try some generic simplifications based on this. 2028 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or, 2029 Q, MaxRecurse)) 2030 return V; 2031 2032 // And distributes over Xor. Try some generic simplifications based on this. 2033 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor, 2034 Q, MaxRecurse)) 2035 return V; 2036 2037 // If the operation is with the result of a select instruction, check whether 2038 // operating on either branch of the select always yields the same value. 2039 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 2040 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q, 2041 MaxRecurse)) 2042 return V; 2043 2044 // If the operation is with the result of a phi instruction, check whether 2045 // operating on all incoming values of the phi always yields the same value. 2046 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 2047 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q, 2048 MaxRecurse)) 2049 return V; 2050 2051 // Assuming the effective width of Y is not larger than A, i.e. all bits 2052 // from X and Y are disjoint in (X << A) | Y, 2053 // if the mask of this AND op covers all bits of X or Y, while it covers 2054 // no bits from the other, we can bypass this AND op. E.g., 2055 // ((X << A) | Y) & Mask -> Y, 2056 // if Mask = ((1 << effective_width_of(Y)) - 1) 2057 // ((X << A) | Y) & Mask -> X << A, 2058 // if Mask = ((1 << effective_width_of(X)) - 1) << A 2059 // SimplifyDemandedBits in InstCombine can optimize the general case. 2060 // This pattern aims to help other passes for a common case. 2061 Value *Y, *XShifted; 2062 if (match(Op1, m_APInt(Mask)) && 2063 match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)), 2064 m_Value(XShifted)), 2065 m_Value(Y)))) { 2066 const unsigned Width = Op0->getType()->getScalarSizeInBits(); 2067 const unsigned ShftCnt = ShAmt->getLimitedValue(Width); 2068 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2069 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros(); 2070 if (EffWidthY <= ShftCnt) { 2071 const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI, 2072 Q.DT); 2073 const unsigned EffWidthX = Width - XKnown.countMinLeadingZeros(); 2074 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY); 2075 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt; 2076 // If the mask is extracting all bits from X or Y as is, we can skip 2077 // this AND op. 2078 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask)) 2079 return Y; 2080 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask)) 2081 return XShifted; 2082 } 2083 } 2084 2085 return nullptr; 2086 } 2087 2088 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 2089 return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit); 2090 } 2091 2092 /// Given operands for an Or, see if we can fold the result. 2093 /// If not, this returns null. 2094 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 2095 unsigned MaxRecurse) { 2096 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q)) 2097 return C; 2098 2099 // X | undef -> -1 2100 // X | -1 = -1 2101 // Do not return Op1 because it may contain undef elements if it's a vector. 2102 if (match(Op1, m_Undef()) || match(Op1, m_AllOnes())) 2103 return Constant::getAllOnesValue(Op0->getType()); 2104 2105 // X | X = X 2106 // X | 0 = X 2107 if (Op0 == Op1 || match(Op1, m_Zero())) 2108 return Op0; 2109 2110 // A | ~A = ~A | A = -1 2111 if (match(Op0, m_Not(m_Specific(Op1))) || 2112 match(Op1, m_Not(m_Specific(Op0)))) 2113 return Constant::getAllOnesValue(Op0->getType()); 2114 2115 // (A & ?) | A = A 2116 if (match(Op0, m_c_And(m_Specific(Op1), m_Value()))) 2117 return Op1; 2118 2119 // A | (A & ?) = A 2120 if (match(Op1, m_c_And(m_Specific(Op0), m_Value()))) 2121 return Op0; 2122 2123 // ~(A & ?) | A = -1 2124 if (match(Op0, m_Not(m_c_And(m_Specific(Op1), m_Value())))) 2125 return Constant::getAllOnesValue(Op1->getType()); 2126 2127 // A | ~(A & ?) = -1 2128 if (match(Op1, m_Not(m_c_And(m_Specific(Op1), m_Value())))) 2129 return Constant::getAllOnesValue(Op0->getType()); 2130 2131 Value *A, *B; 2132 // (A & ~B) | (A ^ B) -> (A ^ B) 2133 // (~B & A) | (A ^ B) -> (A ^ B) 2134 // (A & ~B) | (B ^ A) -> (B ^ A) 2135 // (~B & A) | (B ^ A) -> (B ^ A) 2136 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && 2137 (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) || 2138 match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))) 2139 return Op1; 2140 2141 // Commute the 'or' operands. 2142 // (A ^ B) | (A & ~B) -> (A ^ B) 2143 // (A ^ B) | (~B & A) -> (A ^ B) 2144 // (B ^ A) | (A & ~B) -> (B ^ A) 2145 // (B ^ A) | (~B & A) -> (B ^ A) 2146 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && 2147 (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) || 2148 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))) 2149 return Op0; 2150 2151 // (A & B) | (~A ^ B) -> (~A ^ B) 2152 // (B & A) | (~A ^ B) -> (~A ^ B) 2153 // (A & B) | (B ^ ~A) -> (B ^ ~A) 2154 // (B & A) | (B ^ ~A) -> (B ^ ~A) 2155 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 2156 (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) || 2157 match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) 2158 return Op1; 2159 2160 // (~A ^ B) | (A & B) -> (~A ^ B) 2161 // (~A ^ B) | (B & A) -> (~A ^ B) 2162 // (B ^ ~A) | (A & B) -> (B ^ ~A) 2163 // (B ^ ~A) | (B & A) -> (B ^ ~A) 2164 if (match(Op1, m_And(m_Value(A), m_Value(B))) && 2165 (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) || 2166 match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) 2167 return Op0; 2168 2169 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false)) 2170 return V; 2171 2172 // If we have a multiplication overflow check that is being 'and'ed with a 2173 // check that one of the multipliers is not zero, we can omit the 'and', and 2174 // only keep the overflow check. 2175 if (Value *V = omitCheckForZeroBeforeInvertedMulWithOverflow(Op0, Op1)) 2176 return V; 2177 if (Value *V = omitCheckForZeroBeforeInvertedMulWithOverflow(Op1, Op0)) 2178 return V; 2179 2180 // Try some generic simplifications for associative operations. 2181 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, 2182 MaxRecurse)) 2183 return V; 2184 2185 // Or distributes over And. Try some generic simplifications based on this. 2186 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q, 2187 MaxRecurse)) 2188 return V; 2189 2190 // If the operation is with the result of a select instruction, check whether 2191 // operating on either branch of the select always yields the same value. 2192 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 2193 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, 2194 MaxRecurse)) 2195 return V; 2196 2197 // (A & C1)|(B & C2) 2198 const APInt *C1, *C2; 2199 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) && 2200 match(Op1, m_And(m_Value(B), m_APInt(C2)))) { 2201 if (*C1 == ~*C2) { 2202 // (A & C1)|(B & C2) 2203 // If we have: ((V + N) & C1) | (V & C2) 2204 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0 2205 // replace with V+N. 2206 Value *N; 2207 if (C2->isMask() && // C2 == 0+1+ 2208 match(A, m_c_Add(m_Specific(B), m_Value(N)))) { 2209 // Add commutes, try both ways. 2210 if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2211 return A; 2212 } 2213 // Or commutes, try both ways. 2214 if (C1->isMask() && 2215 match(B, m_c_Add(m_Specific(A), m_Value(N)))) { 2216 // Add commutes, try both ways. 2217 if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2218 return B; 2219 } 2220 } 2221 } 2222 2223 // If the operation is with the result of a phi instruction, check whether 2224 // operating on all incoming values of the phi always yields the same value. 2225 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 2226 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse)) 2227 return V; 2228 2229 return nullptr; 2230 } 2231 2232 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 2233 return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit); 2234 } 2235 2236 /// Given operands for a Xor, see if we can fold the result. 2237 /// If not, this returns null. 2238 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 2239 unsigned MaxRecurse) { 2240 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q)) 2241 return C; 2242 2243 // A ^ undef -> undef 2244 if (match(Op1, m_Undef())) 2245 return Op1; 2246 2247 // A ^ 0 = A 2248 if (match(Op1, m_Zero())) 2249 return Op0; 2250 2251 // A ^ A = 0 2252 if (Op0 == Op1) 2253 return Constant::getNullValue(Op0->getType()); 2254 2255 // A ^ ~A = ~A ^ A = -1 2256 if (match(Op0, m_Not(m_Specific(Op1))) || 2257 match(Op1, m_Not(m_Specific(Op0)))) 2258 return Constant::getAllOnesValue(Op0->getType()); 2259 2260 // Try some generic simplifications for associative operations. 2261 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, 2262 MaxRecurse)) 2263 return V; 2264 2265 // Threading Xor over selects and phi nodes is pointless, so don't bother. 2266 // Threading over the select in "A ^ select(cond, B, C)" means evaluating 2267 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and 2268 // only if B and C are equal. If B and C are equal then (since we assume 2269 // that operands have already been simplified) "select(cond, B, C)" should 2270 // have been simplified to the common value of B and C already. Analysing 2271 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly 2272 // for threading over phi nodes. 2273 2274 return nullptr; 2275 } 2276 2277 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 2278 return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit); 2279 } 2280 2281 2282 static Type *GetCompareTy(Value *Op) { 2283 return CmpInst::makeCmpResultType(Op->getType()); 2284 } 2285 2286 /// Rummage around inside V looking for something equivalent to the comparison 2287 /// "LHS Pred RHS". Return such a value if found, otherwise return null. 2288 /// Helper function for analyzing max/min idioms. 2289 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred, 2290 Value *LHS, Value *RHS) { 2291 SelectInst *SI = dyn_cast<SelectInst>(V); 2292 if (!SI) 2293 return nullptr; 2294 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 2295 if (!Cmp) 2296 return nullptr; 2297 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1); 2298 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS) 2299 return Cmp; 2300 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) && 2301 LHS == CmpRHS && RHS == CmpLHS) 2302 return Cmp; 2303 return nullptr; 2304 } 2305 2306 // A significant optimization not implemented here is assuming that alloca 2307 // addresses are not equal to incoming argument values. They don't *alias*, 2308 // as we say, but that doesn't mean they aren't equal, so we take a 2309 // conservative approach. 2310 // 2311 // This is inspired in part by C++11 5.10p1: 2312 // "Two pointers of the same type compare equal if and only if they are both 2313 // null, both point to the same function, or both represent the same 2314 // address." 2315 // 2316 // This is pretty permissive. 2317 // 2318 // It's also partly due to C11 6.5.9p6: 2319 // "Two pointers compare equal if and only if both are null pointers, both are 2320 // pointers to the same object (including a pointer to an object and a 2321 // subobject at its beginning) or function, both are pointers to one past the 2322 // last element of the same array object, or one is a pointer to one past the 2323 // end of one array object and the other is a pointer to the start of a 2324 // different array object that happens to immediately follow the first array 2325 // object in the address space.) 2326 // 2327 // C11's version is more restrictive, however there's no reason why an argument 2328 // couldn't be a one-past-the-end value for a stack object in the caller and be 2329 // equal to the beginning of a stack object in the callee. 2330 // 2331 // If the C and C++ standards are ever made sufficiently restrictive in this 2332 // area, it may be possible to update LLVM's semantics accordingly and reinstate 2333 // this optimization. 2334 static Constant * 2335 computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI, 2336 const DominatorTree *DT, CmpInst::Predicate Pred, 2337 AssumptionCache *AC, const Instruction *CxtI, 2338 const InstrInfoQuery &IIQ, Value *LHS, Value *RHS) { 2339 // First, skip past any trivial no-ops. 2340 LHS = LHS->stripPointerCasts(); 2341 RHS = RHS->stripPointerCasts(); 2342 2343 // A non-null pointer is not equal to a null pointer. 2344 if (llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr, 2345 IIQ.UseInstrInfo) && 2346 isa<ConstantPointerNull>(RHS) && 2347 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE)) 2348 return ConstantInt::get(GetCompareTy(LHS), 2349 !CmpInst::isTrueWhenEqual(Pred)); 2350 2351 // We can only fold certain predicates on pointer comparisons. 2352 switch (Pred) { 2353 default: 2354 return nullptr; 2355 2356 // Equality comaprisons are easy to fold. 2357 case CmpInst::ICMP_EQ: 2358 case CmpInst::ICMP_NE: 2359 break; 2360 2361 // We can only handle unsigned relational comparisons because 'inbounds' on 2362 // a GEP only protects against unsigned wrapping. 2363 case CmpInst::ICMP_UGT: 2364 case CmpInst::ICMP_UGE: 2365 case CmpInst::ICMP_ULT: 2366 case CmpInst::ICMP_ULE: 2367 // However, we have to switch them to their signed variants to handle 2368 // negative indices from the base pointer. 2369 Pred = ICmpInst::getSignedPredicate(Pred); 2370 break; 2371 } 2372 2373 // Strip off any constant offsets so that we can reason about them. 2374 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets 2375 // here and compare base addresses like AliasAnalysis does, however there are 2376 // numerous hazards. AliasAnalysis and its utilities rely on special rules 2377 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis 2378 // doesn't need to guarantee pointer inequality when it says NoAlias. 2379 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 2380 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 2381 2382 // If LHS and RHS are related via constant offsets to the same base 2383 // value, we can replace it with an icmp which just compares the offsets. 2384 if (LHS == RHS) 2385 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset); 2386 2387 // Various optimizations for (in)equality comparisons. 2388 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { 2389 // Different non-empty allocations that exist at the same time have 2390 // different addresses (if the program can tell). Global variables always 2391 // exist, so they always exist during the lifetime of each other and all 2392 // allocas. Two different allocas usually have different addresses... 2393 // 2394 // However, if there's an @llvm.stackrestore dynamically in between two 2395 // allocas, they may have the same address. It's tempting to reduce the 2396 // scope of the problem by only looking at *static* allocas here. That would 2397 // cover the majority of allocas while significantly reducing the likelihood 2398 // of having an @llvm.stackrestore pop up in the middle. However, it's not 2399 // actually impossible for an @llvm.stackrestore to pop up in the middle of 2400 // an entry block. Also, if we have a block that's not attached to a 2401 // function, we can't tell if it's "static" under the current definition. 2402 // Theoretically, this problem could be fixed by creating a new kind of 2403 // instruction kind specifically for static allocas. Such a new instruction 2404 // could be required to be at the top of the entry block, thus preventing it 2405 // from being subject to a @llvm.stackrestore. Instcombine could even 2406 // convert regular allocas into these special allocas. It'd be nifty. 2407 // However, until then, this problem remains open. 2408 // 2409 // So, we'll assume that two non-empty allocas have different addresses 2410 // for now. 2411 // 2412 // With all that, if the offsets are within the bounds of their allocations 2413 // (and not one-past-the-end! so we can't use inbounds!), and their 2414 // allocations aren't the same, the pointers are not equal. 2415 // 2416 // Note that it's not necessary to check for LHS being a global variable 2417 // address, due to canonicalization and constant folding. 2418 if (isa<AllocaInst>(LHS) && 2419 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) { 2420 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset); 2421 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset); 2422 uint64_t LHSSize, RHSSize; 2423 ObjectSizeOpts Opts; 2424 Opts.NullIsUnknownSize = 2425 NullPointerIsDefined(cast<AllocaInst>(LHS)->getFunction()); 2426 if (LHSOffsetCI && RHSOffsetCI && 2427 getObjectSize(LHS, LHSSize, DL, TLI, Opts) && 2428 getObjectSize(RHS, RHSSize, DL, TLI, Opts)) { 2429 const APInt &LHSOffsetValue = LHSOffsetCI->getValue(); 2430 const APInt &RHSOffsetValue = RHSOffsetCI->getValue(); 2431 if (!LHSOffsetValue.isNegative() && 2432 !RHSOffsetValue.isNegative() && 2433 LHSOffsetValue.ult(LHSSize) && 2434 RHSOffsetValue.ult(RHSSize)) { 2435 return ConstantInt::get(GetCompareTy(LHS), 2436 !CmpInst::isTrueWhenEqual(Pred)); 2437 } 2438 } 2439 2440 // Repeat the above check but this time without depending on DataLayout 2441 // or being able to compute a precise size. 2442 if (!cast<PointerType>(LHS->getType())->isEmptyTy() && 2443 !cast<PointerType>(RHS->getType())->isEmptyTy() && 2444 LHSOffset->isNullValue() && 2445 RHSOffset->isNullValue()) 2446 return ConstantInt::get(GetCompareTy(LHS), 2447 !CmpInst::isTrueWhenEqual(Pred)); 2448 } 2449 2450 // Even if an non-inbounds GEP occurs along the path we can still optimize 2451 // equality comparisons concerning the result. We avoid walking the whole 2452 // chain again by starting where the last calls to 2453 // stripAndComputeConstantOffsets left off and accumulate the offsets. 2454 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true); 2455 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true); 2456 if (LHS == RHS) 2457 return ConstantExpr::getICmp(Pred, 2458 ConstantExpr::getAdd(LHSOffset, LHSNoBound), 2459 ConstantExpr::getAdd(RHSOffset, RHSNoBound)); 2460 2461 // If one side of the equality comparison must come from a noalias call 2462 // (meaning a system memory allocation function), and the other side must 2463 // come from a pointer that cannot overlap with dynamically-allocated 2464 // memory within the lifetime of the current function (allocas, byval 2465 // arguments, globals), then determine the comparison result here. 2466 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs; 2467 GetUnderlyingObjects(LHS, LHSUObjs, DL); 2468 GetUnderlyingObjects(RHS, RHSUObjs, DL); 2469 2470 // Is the set of underlying objects all noalias calls? 2471 auto IsNAC = [](ArrayRef<const Value *> Objects) { 2472 return all_of(Objects, isNoAliasCall); 2473 }; 2474 2475 // Is the set of underlying objects all things which must be disjoint from 2476 // noalias calls. For allocas, we consider only static ones (dynamic 2477 // allocas might be transformed into calls to malloc not simultaneously 2478 // live with the compared-to allocation). For globals, we exclude symbols 2479 // that might be resolve lazily to symbols in another dynamically-loaded 2480 // library (and, thus, could be malloc'ed by the implementation). 2481 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) { 2482 return all_of(Objects, [](const Value *V) { 2483 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) 2484 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca(); 2485 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 2486 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() || 2487 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) && 2488 !GV->isThreadLocal(); 2489 if (const Argument *A = dyn_cast<Argument>(V)) 2490 return A->hasByValAttr(); 2491 return false; 2492 }); 2493 }; 2494 2495 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) || 2496 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs))) 2497 return ConstantInt::get(GetCompareTy(LHS), 2498 !CmpInst::isTrueWhenEqual(Pred)); 2499 2500 // Fold comparisons for non-escaping pointer even if the allocation call 2501 // cannot be elided. We cannot fold malloc comparison to null. Also, the 2502 // dynamic allocation call could be either of the operands. 2503 Value *MI = nullptr; 2504 if (isAllocLikeFn(LHS, TLI) && 2505 llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT)) 2506 MI = LHS; 2507 else if (isAllocLikeFn(RHS, TLI) && 2508 llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT)) 2509 MI = RHS; 2510 // FIXME: We should also fold the compare when the pointer escapes, but the 2511 // compare dominates the pointer escape 2512 if (MI && !PointerMayBeCaptured(MI, true, true)) 2513 return ConstantInt::get(GetCompareTy(LHS), 2514 CmpInst::isFalseWhenEqual(Pred)); 2515 } 2516 2517 // Otherwise, fail. 2518 return nullptr; 2519 } 2520 2521 /// Fold an icmp when its operands have i1 scalar type. 2522 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS, 2523 Value *RHS, const SimplifyQuery &Q) { 2524 Type *ITy = GetCompareTy(LHS); // The return type. 2525 Type *OpTy = LHS->getType(); // The operand type. 2526 if (!OpTy->isIntOrIntVectorTy(1)) 2527 return nullptr; 2528 2529 // A boolean compared to true/false can be simplified in 14 out of the 20 2530 // (10 predicates * 2 constants) possible combinations. Cases not handled here 2531 // require a 'not' of the LHS, so those must be transformed in InstCombine. 2532 if (match(RHS, m_Zero())) { 2533 switch (Pred) { 2534 case CmpInst::ICMP_NE: // X != 0 -> X 2535 case CmpInst::ICMP_UGT: // X >u 0 -> X 2536 case CmpInst::ICMP_SLT: // X <s 0 -> X 2537 return LHS; 2538 2539 case CmpInst::ICMP_ULT: // X <u 0 -> false 2540 case CmpInst::ICMP_SGT: // X >s 0 -> false 2541 return getFalse(ITy); 2542 2543 case CmpInst::ICMP_UGE: // X >=u 0 -> true 2544 case CmpInst::ICMP_SLE: // X <=s 0 -> true 2545 return getTrue(ITy); 2546 2547 default: break; 2548 } 2549 } else if (match(RHS, m_One())) { 2550 switch (Pred) { 2551 case CmpInst::ICMP_EQ: // X == 1 -> X 2552 case CmpInst::ICMP_UGE: // X >=u 1 -> X 2553 case CmpInst::ICMP_SLE: // X <=s -1 -> X 2554 return LHS; 2555 2556 case CmpInst::ICMP_UGT: // X >u 1 -> false 2557 case CmpInst::ICMP_SLT: // X <s -1 -> false 2558 return getFalse(ITy); 2559 2560 case CmpInst::ICMP_ULE: // X <=u 1 -> true 2561 case CmpInst::ICMP_SGE: // X >=s -1 -> true 2562 return getTrue(ITy); 2563 2564 default: break; 2565 } 2566 } 2567 2568 switch (Pred) { 2569 default: 2570 break; 2571 case ICmpInst::ICMP_UGE: 2572 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false)) 2573 return getTrue(ITy); 2574 break; 2575 case ICmpInst::ICMP_SGE: 2576 /// For signed comparison, the values for an i1 are 0 and -1 2577 /// respectively. This maps into a truth table of: 2578 /// LHS | RHS | LHS >=s RHS | LHS implies RHS 2579 /// 0 | 0 | 1 (0 >= 0) | 1 2580 /// 0 | 1 | 1 (0 >= -1) | 1 2581 /// 1 | 0 | 0 (-1 >= 0) | 0 2582 /// 1 | 1 | 1 (-1 >= -1) | 1 2583 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false)) 2584 return getTrue(ITy); 2585 break; 2586 case ICmpInst::ICMP_ULE: 2587 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false)) 2588 return getTrue(ITy); 2589 break; 2590 } 2591 2592 return nullptr; 2593 } 2594 2595 /// Try hard to fold icmp with zero RHS because this is a common case. 2596 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS, 2597 Value *RHS, const SimplifyQuery &Q) { 2598 if (!match(RHS, m_Zero())) 2599 return nullptr; 2600 2601 Type *ITy = GetCompareTy(LHS); // The return type. 2602 switch (Pred) { 2603 default: 2604 llvm_unreachable("Unknown ICmp predicate!"); 2605 case ICmpInst::ICMP_ULT: 2606 return getFalse(ITy); 2607 case ICmpInst::ICMP_UGE: 2608 return getTrue(ITy); 2609 case ICmpInst::ICMP_EQ: 2610 case ICmpInst::ICMP_ULE: 2611 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) 2612 return getFalse(ITy); 2613 break; 2614 case ICmpInst::ICMP_NE: 2615 case ICmpInst::ICMP_UGT: 2616 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) 2617 return getTrue(ITy); 2618 break; 2619 case ICmpInst::ICMP_SLT: { 2620 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2621 if (LHSKnown.isNegative()) 2622 return getTrue(ITy); 2623 if (LHSKnown.isNonNegative()) 2624 return getFalse(ITy); 2625 break; 2626 } 2627 case ICmpInst::ICMP_SLE: { 2628 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2629 if (LHSKnown.isNegative()) 2630 return getTrue(ITy); 2631 if (LHSKnown.isNonNegative() && 2632 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2633 return getFalse(ITy); 2634 break; 2635 } 2636 case ICmpInst::ICMP_SGE: { 2637 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2638 if (LHSKnown.isNegative()) 2639 return getFalse(ITy); 2640 if (LHSKnown.isNonNegative()) 2641 return getTrue(ITy); 2642 break; 2643 } 2644 case ICmpInst::ICMP_SGT: { 2645 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2646 if (LHSKnown.isNegative()) 2647 return getFalse(ITy); 2648 if (LHSKnown.isNonNegative() && 2649 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2650 return getTrue(ITy); 2651 break; 2652 } 2653 } 2654 2655 return nullptr; 2656 } 2657 2658 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS, 2659 Value *RHS, const InstrInfoQuery &IIQ) { 2660 Type *ITy = GetCompareTy(RHS); // The return type. 2661 2662 Value *X; 2663 // Sign-bit checks can be optimized to true/false after unsigned 2664 // floating-point casts: 2665 // icmp slt (bitcast (uitofp X)), 0 --> false 2666 // icmp sgt (bitcast (uitofp X)), -1 --> true 2667 if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) { 2668 if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero())) 2669 return ConstantInt::getFalse(ITy); 2670 if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes())) 2671 return ConstantInt::getTrue(ITy); 2672 } 2673 2674 const APInt *C; 2675 if (!match(RHS, m_APInt(C))) 2676 return nullptr; 2677 2678 // Rule out tautological comparisons (eg., ult 0 or uge 0). 2679 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C); 2680 if (RHS_CR.isEmptySet()) 2681 return ConstantInt::getFalse(ITy); 2682 if (RHS_CR.isFullSet()) 2683 return ConstantInt::getTrue(ITy); 2684 2685 ConstantRange LHS_CR = computeConstantRange(LHS, IIQ.UseInstrInfo); 2686 if (!LHS_CR.isFullSet()) { 2687 if (RHS_CR.contains(LHS_CR)) 2688 return ConstantInt::getTrue(ITy); 2689 if (RHS_CR.inverse().contains(LHS_CR)) 2690 return ConstantInt::getFalse(ITy); 2691 } 2692 2693 return nullptr; 2694 } 2695 2696 /// TODO: A large part of this logic is duplicated in InstCombine's 2697 /// foldICmpBinOp(). We should be able to share that and avoid the code 2698 /// duplication. 2699 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, 2700 Value *RHS, const SimplifyQuery &Q, 2701 unsigned MaxRecurse) { 2702 Type *ITy = GetCompareTy(LHS); // The return type. 2703 2704 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS); 2705 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS); 2706 if (MaxRecurse && (LBO || RBO)) { 2707 // Analyze the case when either LHS or RHS is an add instruction. 2708 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 2709 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null). 2710 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false; 2711 if (LBO && LBO->getOpcode() == Instruction::Add) { 2712 A = LBO->getOperand(0); 2713 B = LBO->getOperand(1); 2714 NoLHSWrapProblem = 2715 ICmpInst::isEquality(Pred) || 2716 (CmpInst::isUnsigned(Pred) && 2717 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) || 2718 (CmpInst::isSigned(Pred) && 2719 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO))); 2720 } 2721 if (RBO && RBO->getOpcode() == Instruction::Add) { 2722 C = RBO->getOperand(0); 2723 D = RBO->getOperand(1); 2724 NoRHSWrapProblem = 2725 ICmpInst::isEquality(Pred) || 2726 (CmpInst::isUnsigned(Pred) && 2727 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) || 2728 (CmpInst::isSigned(Pred) && 2729 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO))); 2730 } 2731 2732 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. 2733 if ((A == RHS || B == RHS) && NoLHSWrapProblem) 2734 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A, 2735 Constant::getNullValue(RHS->getType()), Q, 2736 MaxRecurse - 1)) 2737 return V; 2738 2739 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. 2740 if ((C == LHS || D == LHS) && NoRHSWrapProblem) 2741 if (Value *V = 2742 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()), 2743 C == LHS ? D : C, Q, MaxRecurse - 1)) 2744 return V; 2745 2746 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow. 2747 if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem && 2748 NoRHSWrapProblem) { 2749 // Determine Y and Z in the form icmp (X+Y), (X+Z). 2750 Value *Y, *Z; 2751 if (A == C) { 2752 // C + B == C + D -> B == D 2753 Y = B; 2754 Z = D; 2755 } else if (A == D) { 2756 // D + B == C + D -> B == C 2757 Y = B; 2758 Z = C; 2759 } else if (B == C) { 2760 // A + C == C + D -> A == D 2761 Y = A; 2762 Z = D; 2763 } else { 2764 assert(B == D); 2765 // A + D == C + D -> A == C 2766 Y = A; 2767 Z = C; 2768 } 2769 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1)) 2770 return V; 2771 } 2772 } 2773 2774 { 2775 Value *Y = nullptr; 2776 // icmp pred (or X, Y), X 2777 if (LBO && match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) { 2778 if (Pred == ICmpInst::ICMP_ULT) 2779 return getFalse(ITy); 2780 if (Pred == ICmpInst::ICMP_UGE) 2781 return getTrue(ITy); 2782 2783 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) { 2784 KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2785 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2786 if (RHSKnown.isNonNegative() && YKnown.isNegative()) 2787 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy); 2788 if (RHSKnown.isNegative() || YKnown.isNonNegative()) 2789 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy); 2790 } 2791 } 2792 // icmp pred X, (or X, Y) 2793 if (RBO && match(RBO, m_c_Or(m_Value(Y), m_Specific(LHS)))) { 2794 if (Pred == ICmpInst::ICMP_ULE) 2795 return getTrue(ITy); 2796 if (Pred == ICmpInst::ICMP_UGT) 2797 return getFalse(ITy); 2798 2799 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) { 2800 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2801 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2802 if (LHSKnown.isNonNegative() && YKnown.isNegative()) 2803 return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy); 2804 if (LHSKnown.isNegative() || YKnown.isNonNegative()) 2805 return Pred == ICmpInst::ICMP_SGT ? getFalse(ITy) : getTrue(ITy); 2806 } 2807 } 2808 } 2809 2810 // icmp pred (and X, Y), X 2811 if (LBO && match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) { 2812 if (Pred == ICmpInst::ICMP_UGT) 2813 return getFalse(ITy); 2814 if (Pred == ICmpInst::ICMP_ULE) 2815 return getTrue(ITy); 2816 } 2817 // icmp pred X, (and X, Y) 2818 if (RBO && match(RBO, m_c_And(m_Value(), m_Specific(LHS)))) { 2819 if (Pred == ICmpInst::ICMP_UGE) 2820 return getTrue(ITy); 2821 if (Pred == ICmpInst::ICMP_ULT) 2822 return getFalse(ITy); 2823 } 2824 2825 // 0 - (zext X) pred C 2826 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) { 2827 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) { 2828 if (RHSC->getValue().isStrictlyPositive()) { 2829 if (Pred == ICmpInst::ICMP_SLT) 2830 return ConstantInt::getTrue(RHSC->getContext()); 2831 if (Pred == ICmpInst::ICMP_SGE) 2832 return ConstantInt::getFalse(RHSC->getContext()); 2833 if (Pred == ICmpInst::ICMP_EQ) 2834 return ConstantInt::getFalse(RHSC->getContext()); 2835 if (Pred == ICmpInst::ICMP_NE) 2836 return ConstantInt::getTrue(RHSC->getContext()); 2837 } 2838 if (RHSC->getValue().isNonNegative()) { 2839 if (Pred == ICmpInst::ICMP_SLE) 2840 return ConstantInt::getTrue(RHSC->getContext()); 2841 if (Pred == ICmpInst::ICMP_SGT) 2842 return ConstantInt::getFalse(RHSC->getContext()); 2843 } 2844 } 2845 } 2846 2847 // icmp pred (urem X, Y), Y 2848 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) { 2849 switch (Pred) { 2850 default: 2851 break; 2852 case ICmpInst::ICMP_SGT: 2853 case ICmpInst::ICMP_SGE: { 2854 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2855 if (!Known.isNonNegative()) 2856 break; 2857 LLVM_FALLTHROUGH; 2858 } 2859 case ICmpInst::ICMP_EQ: 2860 case ICmpInst::ICMP_UGT: 2861 case ICmpInst::ICMP_UGE: 2862 return getFalse(ITy); 2863 case ICmpInst::ICMP_SLT: 2864 case ICmpInst::ICMP_SLE: { 2865 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2866 if (!Known.isNonNegative()) 2867 break; 2868 LLVM_FALLTHROUGH; 2869 } 2870 case ICmpInst::ICMP_NE: 2871 case ICmpInst::ICMP_ULT: 2872 case ICmpInst::ICMP_ULE: 2873 return getTrue(ITy); 2874 } 2875 } 2876 2877 // icmp pred X, (urem Y, X) 2878 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) { 2879 switch (Pred) { 2880 default: 2881 break; 2882 case ICmpInst::ICMP_SGT: 2883 case ICmpInst::ICMP_SGE: { 2884 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2885 if (!Known.isNonNegative()) 2886 break; 2887 LLVM_FALLTHROUGH; 2888 } 2889 case ICmpInst::ICMP_NE: 2890 case ICmpInst::ICMP_UGT: 2891 case ICmpInst::ICMP_UGE: 2892 return getTrue(ITy); 2893 case ICmpInst::ICMP_SLT: 2894 case ICmpInst::ICMP_SLE: { 2895 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2896 if (!Known.isNonNegative()) 2897 break; 2898 LLVM_FALLTHROUGH; 2899 } 2900 case ICmpInst::ICMP_EQ: 2901 case ICmpInst::ICMP_ULT: 2902 case ICmpInst::ICMP_ULE: 2903 return getFalse(ITy); 2904 } 2905 } 2906 2907 // x >> y <=u x 2908 // x udiv y <=u x. 2909 if (LBO && (match(LBO, m_LShr(m_Specific(RHS), m_Value())) || 2910 match(LBO, m_UDiv(m_Specific(RHS), m_Value())))) { 2911 // icmp pred (X op Y), X 2912 if (Pred == ICmpInst::ICMP_UGT) 2913 return getFalse(ITy); 2914 if (Pred == ICmpInst::ICMP_ULE) 2915 return getTrue(ITy); 2916 } 2917 2918 // x >=u x >> y 2919 // x >=u x udiv y. 2920 if (RBO && (match(RBO, m_LShr(m_Specific(LHS), m_Value())) || 2921 match(RBO, m_UDiv(m_Specific(LHS), m_Value())))) { 2922 // icmp pred X, (X op Y) 2923 if (Pred == ICmpInst::ICMP_ULT) 2924 return getFalse(ITy); 2925 if (Pred == ICmpInst::ICMP_UGE) 2926 return getTrue(ITy); 2927 } 2928 2929 // handle: 2930 // CI2 << X == CI 2931 // CI2 << X != CI 2932 // 2933 // where CI2 is a power of 2 and CI isn't 2934 if (auto *CI = dyn_cast<ConstantInt>(RHS)) { 2935 const APInt *CI2Val, *CIVal = &CI->getValue(); 2936 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) && 2937 CI2Val->isPowerOf2()) { 2938 if (!CIVal->isPowerOf2()) { 2939 // CI2 << X can equal zero in some circumstances, 2940 // this simplification is unsafe if CI is zero. 2941 // 2942 // We know it is safe if: 2943 // - The shift is nsw, we can't shift out the one bit. 2944 // - The shift is nuw, we can't shift out the one bit. 2945 // - CI2 is one 2946 // - CI isn't zero 2947 if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) || 2948 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) || 2949 CI2Val->isOneValue() || !CI->isZero()) { 2950 if (Pred == ICmpInst::ICMP_EQ) 2951 return ConstantInt::getFalse(RHS->getContext()); 2952 if (Pred == ICmpInst::ICMP_NE) 2953 return ConstantInt::getTrue(RHS->getContext()); 2954 } 2955 } 2956 if (CIVal->isSignMask() && CI2Val->isOneValue()) { 2957 if (Pred == ICmpInst::ICMP_UGT) 2958 return ConstantInt::getFalse(RHS->getContext()); 2959 if (Pred == ICmpInst::ICMP_ULE) 2960 return ConstantInt::getTrue(RHS->getContext()); 2961 } 2962 } 2963 } 2964 2965 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() && 2966 LBO->getOperand(1) == RBO->getOperand(1)) { 2967 switch (LBO->getOpcode()) { 2968 default: 2969 break; 2970 case Instruction::UDiv: 2971 case Instruction::LShr: 2972 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) || 2973 !Q.IIQ.isExact(RBO)) 2974 break; 2975 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2976 RBO->getOperand(0), Q, MaxRecurse - 1)) 2977 return V; 2978 break; 2979 case Instruction::SDiv: 2980 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) || 2981 !Q.IIQ.isExact(RBO)) 2982 break; 2983 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2984 RBO->getOperand(0), Q, MaxRecurse - 1)) 2985 return V; 2986 break; 2987 case Instruction::AShr: 2988 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO)) 2989 break; 2990 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2991 RBO->getOperand(0), Q, MaxRecurse - 1)) 2992 return V; 2993 break; 2994 case Instruction::Shl: { 2995 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO); 2996 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO); 2997 if (!NUW && !NSW) 2998 break; 2999 if (!NSW && ICmpInst::isSigned(Pred)) 3000 break; 3001 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 3002 RBO->getOperand(0), Q, MaxRecurse - 1)) 3003 return V; 3004 break; 3005 } 3006 } 3007 } 3008 return nullptr; 3009 } 3010 3011 /// Simplify integer comparisons where at least one operand of the compare 3012 /// matches an integer min/max idiom. 3013 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS, 3014 Value *RHS, const SimplifyQuery &Q, 3015 unsigned MaxRecurse) { 3016 Type *ITy = GetCompareTy(LHS); // The return type. 3017 Value *A, *B; 3018 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE; 3019 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B". 3020 3021 // Signed variants on "max(a,b)>=a -> true". 3022 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 3023 if (A != RHS) 3024 std::swap(A, B); // smax(A, B) pred A. 3025 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 3026 // We analyze this as smax(A, B) pred A. 3027 P = Pred; 3028 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) && 3029 (A == LHS || B == LHS)) { 3030 if (A != LHS) 3031 std::swap(A, B); // A pred smax(A, B). 3032 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 3033 // We analyze this as smax(A, B) swapped-pred A. 3034 P = CmpInst::getSwappedPredicate(Pred); 3035 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 3036 (A == RHS || B == RHS)) { 3037 if (A != RHS) 3038 std::swap(A, B); // smin(A, B) pred A. 3039 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 3040 // We analyze this as smax(-A, -B) swapped-pred -A. 3041 // Note that we do not need to actually form -A or -B thanks to EqP. 3042 P = CmpInst::getSwappedPredicate(Pred); 3043 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) && 3044 (A == LHS || B == LHS)) { 3045 if (A != LHS) 3046 std::swap(A, B); // A pred smin(A, B). 3047 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 3048 // We analyze this as smax(-A, -B) pred -A. 3049 // Note that we do not need to actually form -A or -B thanks to EqP. 3050 P = Pred; 3051 } 3052 if (P != CmpInst::BAD_ICMP_PREDICATE) { 3053 // Cases correspond to "max(A, B) p A". 3054 switch (P) { 3055 default: 3056 break; 3057 case CmpInst::ICMP_EQ: 3058 case CmpInst::ICMP_SLE: 3059 // Equivalent to "A EqP B". This may be the same as the condition tested 3060 // in the max/min; if so, we can just return that. 3061 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 3062 return V; 3063 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 3064 return V; 3065 // Otherwise, see if "A EqP B" simplifies. 3066 if (MaxRecurse) 3067 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1)) 3068 return V; 3069 break; 3070 case CmpInst::ICMP_NE: 3071 case CmpInst::ICMP_SGT: { 3072 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 3073 // Equivalent to "A InvEqP B". This may be the same as the condition 3074 // tested in the max/min; if so, we can just return that. 3075 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 3076 return V; 3077 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 3078 return V; 3079 // Otherwise, see if "A InvEqP B" simplifies. 3080 if (MaxRecurse) 3081 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1)) 3082 return V; 3083 break; 3084 } 3085 case CmpInst::ICMP_SGE: 3086 // Always true. 3087 return getTrue(ITy); 3088 case CmpInst::ICMP_SLT: 3089 // Always false. 3090 return getFalse(ITy); 3091 } 3092 } 3093 3094 // Unsigned variants on "max(a,b)>=a -> true". 3095 P = CmpInst::BAD_ICMP_PREDICATE; 3096 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 3097 if (A != RHS) 3098 std::swap(A, B); // umax(A, B) pred A. 3099 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 3100 // We analyze this as umax(A, B) pred A. 3101 P = Pred; 3102 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) && 3103 (A == LHS || B == LHS)) { 3104 if (A != LHS) 3105 std::swap(A, B); // A pred umax(A, B). 3106 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 3107 // We analyze this as umax(A, B) swapped-pred A. 3108 P = CmpInst::getSwappedPredicate(Pred); 3109 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 3110 (A == RHS || B == RHS)) { 3111 if (A != RHS) 3112 std::swap(A, B); // umin(A, B) pred A. 3113 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 3114 // We analyze this as umax(-A, -B) swapped-pred -A. 3115 // Note that we do not need to actually form -A or -B thanks to EqP. 3116 P = CmpInst::getSwappedPredicate(Pred); 3117 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) && 3118 (A == LHS || B == LHS)) { 3119 if (A != LHS) 3120 std::swap(A, B); // A pred umin(A, B). 3121 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 3122 // We analyze this as umax(-A, -B) pred -A. 3123 // Note that we do not need to actually form -A or -B thanks to EqP. 3124 P = Pred; 3125 } 3126 if (P != CmpInst::BAD_ICMP_PREDICATE) { 3127 // Cases correspond to "max(A, B) p A". 3128 switch (P) { 3129 default: 3130 break; 3131 case CmpInst::ICMP_EQ: 3132 case CmpInst::ICMP_ULE: 3133 // Equivalent to "A EqP B". This may be the same as the condition tested 3134 // in the max/min; if so, we can just return that. 3135 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 3136 return V; 3137 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 3138 return V; 3139 // Otherwise, see if "A EqP B" simplifies. 3140 if (MaxRecurse) 3141 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1)) 3142 return V; 3143 break; 3144 case CmpInst::ICMP_NE: 3145 case CmpInst::ICMP_UGT: { 3146 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 3147 // Equivalent to "A InvEqP B". This may be the same as the condition 3148 // tested in the max/min; if so, we can just return that. 3149 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 3150 return V; 3151 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 3152 return V; 3153 // Otherwise, see if "A InvEqP B" simplifies. 3154 if (MaxRecurse) 3155 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1)) 3156 return V; 3157 break; 3158 } 3159 case CmpInst::ICMP_UGE: 3160 // Always true. 3161 return getTrue(ITy); 3162 case CmpInst::ICMP_ULT: 3163 // Always false. 3164 return getFalse(ITy); 3165 } 3166 } 3167 3168 // Variants on "max(x,y) >= min(x,z)". 3169 Value *C, *D; 3170 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && 3171 match(RHS, m_SMin(m_Value(C), m_Value(D))) && 3172 (A == C || A == D || B == C || B == D)) { 3173 // max(x, ?) pred min(x, ?). 3174 if (Pred == CmpInst::ICMP_SGE) 3175 // Always true. 3176 return getTrue(ITy); 3177 if (Pred == CmpInst::ICMP_SLT) 3178 // Always false. 3179 return getFalse(ITy); 3180 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 3181 match(RHS, m_SMax(m_Value(C), m_Value(D))) && 3182 (A == C || A == D || B == C || B == D)) { 3183 // min(x, ?) pred max(x, ?). 3184 if (Pred == CmpInst::ICMP_SLE) 3185 // Always true. 3186 return getTrue(ITy); 3187 if (Pred == CmpInst::ICMP_SGT) 3188 // Always false. 3189 return getFalse(ITy); 3190 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && 3191 match(RHS, m_UMin(m_Value(C), m_Value(D))) && 3192 (A == C || A == D || B == C || B == D)) { 3193 // max(x, ?) pred min(x, ?). 3194 if (Pred == CmpInst::ICMP_UGE) 3195 // Always true. 3196 return getTrue(ITy); 3197 if (Pred == CmpInst::ICMP_ULT) 3198 // Always false. 3199 return getFalse(ITy); 3200 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 3201 match(RHS, m_UMax(m_Value(C), m_Value(D))) && 3202 (A == C || A == D || B == C || B == D)) { 3203 // min(x, ?) pred max(x, ?). 3204 if (Pred == CmpInst::ICMP_ULE) 3205 // Always true. 3206 return getTrue(ITy); 3207 if (Pred == CmpInst::ICMP_UGT) 3208 // Always false. 3209 return getFalse(ITy); 3210 } 3211 3212 return nullptr; 3213 } 3214 3215 /// Given operands for an ICmpInst, see if we can fold the result. 3216 /// If not, this returns null. 3217 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3218 const SimplifyQuery &Q, unsigned MaxRecurse) { 3219 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 3220 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!"); 3221 3222 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 3223 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 3224 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 3225 3226 // If we have a constant, make sure it is on the RHS. 3227 std::swap(LHS, RHS); 3228 Pred = CmpInst::getSwappedPredicate(Pred); 3229 } 3230 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X"); 3231 3232 Type *ITy = GetCompareTy(LHS); // The return type. 3233 3234 // For EQ and NE, we can always pick a value for the undef to make the 3235 // predicate pass or fail, so we can return undef. 3236 // Matches behavior in llvm::ConstantFoldCompareInstruction. 3237 if (isa<UndefValue>(RHS) && ICmpInst::isEquality(Pred)) 3238 return UndefValue::get(ITy); 3239 3240 // icmp X, X -> true/false 3241 // icmp X, undef -> true/false because undef could be X. 3242 if (LHS == RHS || isa<UndefValue>(RHS)) 3243 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred)); 3244 3245 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q)) 3246 return V; 3247 3248 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q)) 3249 return V; 3250 3251 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ)) 3252 return V; 3253 3254 // If both operands have range metadata, use the metadata 3255 // to simplify the comparison. 3256 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) { 3257 auto RHS_Instr = cast<Instruction>(RHS); 3258 auto LHS_Instr = cast<Instruction>(LHS); 3259 3260 if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) && 3261 Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) { 3262 auto RHS_CR = getConstantRangeFromMetadata( 3263 *RHS_Instr->getMetadata(LLVMContext::MD_range)); 3264 auto LHS_CR = getConstantRangeFromMetadata( 3265 *LHS_Instr->getMetadata(LLVMContext::MD_range)); 3266 3267 auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR); 3268 if (Satisfied_CR.contains(LHS_CR)) 3269 return ConstantInt::getTrue(RHS->getContext()); 3270 3271 auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion( 3272 CmpInst::getInversePredicate(Pred), RHS_CR); 3273 if (InversedSatisfied_CR.contains(LHS_CR)) 3274 return ConstantInt::getFalse(RHS->getContext()); 3275 } 3276 } 3277 3278 // Compare of cast, for example (zext X) != 0 -> X != 0 3279 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) { 3280 Instruction *LI = cast<CastInst>(LHS); 3281 Value *SrcOp = LI->getOperand(0); 3282 Type *SrcTy = SrcOp->getType(); 3283 Type *DstTy = LI->getType(); 3284 3285 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input 3286 // if the integer type is the same size as the pointer type. 3287 if (MaxRecurse && isa<PtrToIntInst>(LI) && 3288 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) { 3289 if (Constant *RHSC = dyn_cast<Constant>(RHS)) { 3290 // Transfer the cast to the constant. 3291 if (Value *V = SimplifyICmpInst(Pred, SrcOp, 3292 ConstantExpr::getIntToPtr(RHSC, SrcTy), 3293 Q, MaxRecurse-1)) 3294 return V; 3295 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) { 3296 if (RI->getOperand(0)->getType() == SrcTy) 3297 // Compare without the cast. 3298 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 3299 Q, MaxRecurse-1)) 3300 return V; 3301 } 3302 } 3303 3304 if (isa<ZExtInst>(LHS)) { 3305 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the 3306 // same type. 3307 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) { 3308 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 3309 // Compare X and Y. Note that signed predicates become unsigned. 3310 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 3311 SrcOp, RI->getOperand(0), Q, 3312 MaxRecurse-1)) 3313 return V; 3314 } 3315 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended 3316 // too. If not, then try to deduce the result of the comparison. 3317 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 3318 // Compute the constant that would happen if we truncated to SrcTy then 3319 // reextended to DstTy. 3320 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 3321 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy); 3322 3323 // If the re-extended constant didn't change then this is effectively 3324 // also a case of comparing two zero-extended values. 3325 if (RExt == CI && MaxRecurse) 3326 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 3327 SrcOp, Trunc, Q, MaxRecurse-1)) 3328 return V; 3329 3330 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit 3331 // there. Use this to work out the result of the comparison. 3332 if (RExt != CI) { 3333 switch (Pred) { 3334 default: llvm_unreachable("Unknown ICmp predicate!"); 3335 // LHS <u RHS. 3336 case ICmpInst::ICMP_EQ: 3337 case ICmpInst::ICMP_UGT: 3338 case ICmpInst::ICMP_UGE: 3339 return ConstantInt::getFalse(CI->getContext()); 3340 3341 case ICmpInst::ICMP_NE: 3342 case ICmpInst::ICMP_ULT: 3343 case ICmpInst::ICMP_ULE: 3344 return ConstantInt::getTrue(CI->getContext()); 3345 3346 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS 3347 // is non-negative then LHS <s RHS. 3348 case ICmpInst::ICMP_SGT: 3349 case ICmpInst::ICMP_SGE: 3350 return CI->getValue().isNegative() ? 3351 ConstantInt::getTrue(CI->getContext()) : 3352 ConstantInt::getFalse(CI->getContext()); 3353 3354 case ICmpInst::ICMP_SLT: 3355 case ICmpInst::ICMP_SLE: 3356 return CI->getValue().isNegative() ? 3357 ConstantInt::getFalse(CI->getContext()) : 3358 ConstantInt::getTrue(CI->getContext()); 3359 } 3360 } 3361 } 3362 } 3363 3364 if (isa<SExtInst>(LHS)) { 3365 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the 3366 // same type. 3367 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) { 3368 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 3369 // Compare X and Y. Note that the predicate does not change. 3370 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 3371 Q, MaxRecurse-1)) 3372 return V; 3373 } 3374 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended 3375 // too. If not, then try to deduce the result of the comparison. 3376 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 3377 // Compute the constant that would happen if we truncated to SrcTy then 3378 // reextended to DstTy. 3379 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 3380 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy); 3381 3382 // If the re-extended constant didn't change then this is effectively 3383 // also a case of comparing two sign-extended values. 3384 if (RExt == CI && MaxRecurse) 3385 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1)) 3386 return V; 3387 3388 // Otherwise the upper bits of LHS are all equal, while RHS has varying 3389 // bits there. Use this to work out the result of the comparison. 3390 if (RExt != CI) { 3391 switch (Pred) { 3392 default: llvm_unreachable("Unknown ICmp predicate!"); 3393 case ICmpInst::ICMP_EQ: 3394 return ConstantInt::getFalse(CI->getContext()); 3395 case ICmpInst::ICMP_NE: 3396 return ConstantInt::getTrue(CI->getContext()); 3397 3398 // If RHS is non-negative then LHS <s RHS. If RHS is negative then 3399 // LHS >s RHS. 3400 case ICmpInst::ICMP_SGT: 3401 case ICmpInst::ICMP_SGE: 3402 return CI->getValue().isNegative() ? 3403 ConstantInt::getTrue(CI->getContext()) : 3404 ConstantInt::getFalse(CI->getContext()); 3405 case ICmpInst::ICMP_SLT: 3406 case ICmpInst::ICMP_SLE: 3407 return CI->getValue().isNegative() ? 3408 ConstantInt::getFalse(CI->getContext()) : 3409 ConstantInt::getTrue(CI->getContext()); 3410 3411 // If LHS is non-negative then LHS <u RHS. If LHS is negative then 3412 // LHS >u RHS. 3413 case ICmpInst::ICMP_UGT: 3414 case ICmpInst::ICMP_UGE: 3415 // Comparison is true iff the LHS <s 0. 3416 if (MaxRecurse) 3417 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp, 3418 Constant::getNullValue(SrcTy), 3419 Q, MaxRecurse-1)) 3420 return V; 3421 break; 3422 case ICmpInst::ICMP_ULT: 3423 case ICmpInst::ICMP_ULE: 3424 // Comparison is true iff the LHS >=s 0. 3425 if (MaxRecurse) 3426 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp, 3427 Constant::getNullValue(SrcTy), 3428 Q, MaxRecurse-1)) 3429 return V; 3430 break; 3431 } 3432 } 3433 } 3434 } 3435 } 3436 3437 // icmp eq|ne X, Y -> false|true if X != Y 3438 if (ICmpInst::isEquality(Pred) && 3439 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) { 3440 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy); 3441 } 3442 3443 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse)) 3444 return V; 3445 3446 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse)) 3447 return V; 3448 3449 // Simplify comparisons of related pointers using a powerful, recursive 3450 // GEP-walk when we have target data available.. 3451 if (LHS->getType()->isPointerTy()) 3452 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, 3453 Q.IIQ, LHS, RHS)) 3454 return C; 3455 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS)) 3456 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS)) 3457 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) == 3458 Q.DL.getTypeSizeInBits(CLHS->getType()) && 3459 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) == 3460 Q.DL.getTypeSizeInBits(CRHS->getType())) 3461 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, 3462 Q.IIQ, CLHS->getPointerOperand(), 3463 CRHS->getPointerOperand())) 3464 return C; 3465 3466 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) { 3467 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) { 3468 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() && 3469 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() && 3470 (ICmpInst::isEquality(Pred) || 3471 (GLHS->isInBounds() && GRHS->isInBounds() && 3472 Pred == ICmpInst::getSignedPredicate(Pred)))) { 3473 // The bases are equal and the indices are constant. Build a constant 3474 // expression GEP with the same indices and a null base pointer to see 3475 // what constant folding can make out of it. 3476 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType()); 3477 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end()); 3478 Constant *NewLHS = ConstantExpr::getGetElementPtr( 3479 GLHS->getSourceElementType(), Null, IndicesLHS); 3480 3481 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end()); 3482 Constant *NewRHS = ConstantExpr::getGetElementPtr( 3483 GLHS->getSourceElementType(), Null, IndicesRHS); 3484 return ConstantExpr::getICmp(Pred, NewLHS, NewRHS); 3485 } 3486 } 3487 } 3488 3489 // If the comparison is with the result of a select instruction, check whether 3490 // comparing with either branch of the select always yields the same value. 3491 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 3492 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 3493 return V; 3494 3495 // If the comparison is with the result of a phi instruction, check whether 3496 // doing the compare with each incoming phi value yields a common result. 3497 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 3498 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 3499 return V; 3500 3501 return nullptr; 3502 } 3503 3504 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3505 const SimplifyQuery &Q) { 3506 return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit); 3507 } 3508 3509 /// Given operands for an FCmpInst, see if we can fold the result. 3510 /// If not, this returns null. 3511 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3512 FastMathFlags FMF, const SimplifyQuery &Q, 3513 unsigned MaxRecurse) { 3514 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 3515 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!"); 3516 3517 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 3518 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 3519 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 3520 3521 // If we have a constant, make sure it is on the RHS. 3522 std::swap(LHS, RHS); 3523 Pred = CmpInst::getSwappedPredicate(Pred); 3524 } 3525 3526 // Fold trivial predicates. 3527 Type *RetTy = GetCompareTy(LHS); 3528 if (Pred == FCmpInst::FCMP_FALSE) 3529 return getFalse(RetTy); 3530 if (Pred == FCmpInst::FCMP_TRUE) 3531 return getTrue(RetTy); 3532 3533 // Fold (un)ordered comparison if we can determine there are no NaNs. 3534 if (Pred == FCmpInst::FCMP_UNO || Pred == FCmpInst::FCMP_ORD) 3535 if (FMF.noNaNs() || 3536 (isKnownNeverNaN(LHS, Q.TLI) && isKnownNeverNaN(RHS, Q.TLI))) 3537 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD); 3538 3539 // NaN is unordered; NaN is not ordered. 3540 assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) && 3541 "Comparison must be either ordered or unordered"); 3542 if (match(RHS, m_NaN())) 3543 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred)); 3544 3545 // fcmp pred x, undef and fcmp pred undef, x 3546 // fold to true if unordered, false if ordered 3547 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) { 3548 // Choosing NaN for the undef will always make unordered comparison succeed 3549 // and ordered comparison fail. 3550 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred)); 3551 } 3552 3553 // fcmp x,x -> true/false. Not all compares are foldable. 3554 if (LHS == RHS) { 3555 if (CmpInst::isTrueWhenEqual(Pred)) 3556 return getTrue(RetTy); 3557 if (CmpInst::isFalseWhenEqual(Pred)) 3558 return getFalse(RetTy); 3559 } 3560 3561 // Handle fcmp with constant RHS. 3562 // TODO: Use match with a specific FP value, so these work with vectors with 3563 // undef lanes. 3564 const APFloat *C; 3565 if (match(RHS, m_APFloat(C))) { 3566 // Check whether the constant is an infinity. 3567 if (C->isInfinity()) { 3568 if (C->isNegative()) { 3569 switch (Pred) { 3570 case FCmpInst::FCMP_OLT: 3571 // No value is ordered and less than negative infinity. 3572 return getFalse(RetTy); 3573 case FCmpInst::FCMP_UGE: 3574 // All values are unordered with or at least negative infinity. 3575 return getTrue(RetTy); 3576 default: 3577 break; 3578 } 3579 } else { 3580 switch (Pred) { 3581 case FCmpInst::FCMP_OGT: 3582 // No value is ordered and greater than infinity. 3583 return getFalse(RetTy); 3584 case FCmpInst::FCMP_ULE: 3585 // All values are unordered with and at most infinity. 3586 return getTrue(RetTy); 3587 default: 3588 break; 3589 } 3590 } 3591 } 3592 if (C->isNegative() && !C->isNegZero()) { 3593 assert(!C->isNaN() && "Unexpected NaN constant!"); 3594 // TODO: We can catch more cases by using a range check rather than 3595 // relying on CannotBeOrderedLessThanZero. 3596 switch (Pred) { 3597 case FCmpInst::FCMP_UGE: 3598 case FCmpInst::FCMP_UGT: 3599 case FCmpInst::FCMP_UNE: 3600 // (X >= 0) implies (X > C) when (C < 0) 3601 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3602 return getTrue(RetTy); 3603 break; 3604 case FCmpInst::FCMP_OEQ: 3605 case FCmpInst::FCMP_OLE: 3606 case FCmpInst::FCMP_OLT: 3607 // (X >= 0) implies !(X < C) when (C < 0) 3608 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3609 return getFalse(RetTy); 3610 break; 3611 default: 3612 break; 3613 } 3614 } 3615 3616 // Check comparison of [minnum/maxnum with constant] with other constant. 3617 const APFloat *C2; 3618 if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) && 3619 C2->compare(*C) == APFloat::cmpLessThan) || 3620 (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) && 3621 C2->compare(*C) == APFloat::cmpGreaterThan)) { 3622 bool IsMaxNum = 3623 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum; 3624 // The ordered relationship and minnum/maxnum guarantee that we do not 3625 // have NaN constants, so ordered/unordered preds are handled the same. 3626 switch (Pred) { 3627 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_UEQ: 3628 // minnum(X, LesserC) == C --> false 3629 // maxnum(X, GreaterC) == C --> false 3630 return getFalse(RetTy); 3631 case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_UNE: 3632 // minnum(X, LesserC) != C --> true 3633 // maxnum(X, GreaterC) != C --> true 3634 return getTrue(RetTy); 3635 case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_UGE: 3636 case FCmpInst::FCMP_OGT: case FCmpInst::FCMP_UGT: 3637 // minnum(X, LesserC) >= C --> false 3638 // minnum(X, LesserC) > C --> false 3639 // maxnum(X, GreaterC) >= C --> true 3640 // maxnum(X, GreaterC) > C --> true 3641 return ConstantInt::get(RetTy, IsMaxNum); 3642 case FCmpInst::FCMP_OLE: case FCmpInst::FCMP_ULE: 3643 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_ULT: 3644 // minnum(X, LesserC) <= C --> true 3645 // minnum(X, LesserC) < C --> true 3646 // maxnum(X, GreaterC) <= C --> false 3647 // maxnum(X, GreaterC) < C --> false 3648 return ConstantInt::get(RetTy, !IsMaxNum); 3649 default: 3650 // TRUE/FALSE/ORD/UNO should be handled before this. 3651 llvm_unreachable("Unexpected fcmp predicate"); 3652 } 3653 } 3654 } 3655 3656 if (match(RHS, m_AnyZeroFP())) { 3657 switch (Pred) { 3658 case FCmpInst::FCMP_OGE: 3659 case FCmpInst::FCMP_ULT: 3660 // Positive or zero X >= 0.0 --> true 3661 // Positive or zero X < 0.0 --> false 3662 if ((FMF.noNaNs() || isKnownNeverNaN(LHS, Q.TLI)) && 3663 CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3664 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy); 3665 break; 3666 case FCmpInst::FCMP_UGE: 3667 case FCmpInst::FCMP_OLT: 3668 // Positive or zero or nan X >= 0.0 --> true 3669 // Positive or zero or nan X < 0.0 --> false 3670 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3671 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy); 3672 break; 3673 default: 3674 break; 3675 } 3676 } 3677 3678 // If the comparison is with the result of a select instruction, check whether 3679 // comparing with either branch of the select always yields the same value. 3680 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 3681 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 3682 return V; 3683 3684 // If the comparison is with the result of a phi instruction, check whether 3685 // doing the compare with each incoming phi value yields a common result. 3686 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 3687 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 3688 return V; 3689 3690 return nullptr; 3691 } 3692 3693 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3694 FastMathFlags FMF, const SimplifyQuery &Q) { 3695 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit); 3696 } 3697 3698 /// See if V simplifies when its operand Op is replaced with RepOp. 3699 static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, 3700 const SimplifyQuery &Q, 3701 unsigned MaxRecurse) { 3702 // Trivial replacement. 3703 if (V == Op) 3704 return RepOp; 3705 3706 // We cannot replace a constant, and shouldn't even try. 3707 if (isa<Constant>(Op)) 3708 return nullptr; 3709 3710 auto *I = dyn_cast<Instruction>(V); 3711 if (!I) 3712 return nullptr; 3713 3714 // If this is a binary operator, try to simplify it with the replaced op. 3715 if (auto *B = dyn_cast<BinaryOperator>(I)) { 3716 // Consider: 3717 // %cmp = icmp eq i32 %x, 2147483647 3718 // %add = add nsw i32 %x, 1 3719 // %sel = select i1 %cmp, i32 -2147483648, i32 %add 3720 // 3721 // We can't replace %sel with %add unless we strip away the flags. 3722 // TODO: This is an unusual limitation because better analysis results in 3723 // worse simplification. InstCombine can do this fold more generally 3724 // by dropping the flags. Remove this fold to save compile-time? 3725 if (isa<OverflowingBinaryOperator>(B)) 3726 if (Q.IIQ.hasNoSignedWrap(B) || Q.IIQ.hasNoUnsignedWrap(B)) 3727 return nullptr; 3728 if (isa<PossiblyExactOperator>(B) && Q.IIQ.isExact(B)) 3729 return nullptr; 3730 3731 if (MaxRecurse) { 3732 if (B->getOperand(0) == Op) 3733 return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q, 3734 MaxRecurse - 1); 3735 if (B->getOperand(1) == Op) 3736 return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q, 3737 MaxRecurse - 1); 3738 } 3739 } 3740 3741 // Same for CmpInsts. 3742 if (CmpInst *C = dyn_cast<CmpInst>(I)) { 3743 if (MaxRecurse) { 3744 if (C->getOperand(0) == Op) 3745 return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q, 3746 MaxRecurse - 1); 3747 if (C->getOperand(1) == Op) 3748 return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q, 3749 MaxRecurse - 1); 3750 } 3751 } 3752 3753 // Same for GEPs. 3754 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 3755 if (MaxRecurse) { 3756 SmallVector<Value *, 8> NewOps(GEP->getNumOperands()); 3757 transform(GEP->operands(), NewOps.begin(), 3758 [&](Value *V) { return V == Op ? RepOp : V; }); 3759 return SimplifyGEPInst(GEP->getSourceElementType(), NewOps, Q, 3760 MaxRecurse - 1); 3761 } 3762 } 3763 3764 // TODO: We could hand off more cases to instsimplify here. 3765 3766 // If all operands are constant after substituting Op for RepOp then we can 3767 // constant fold the instruction. 3768 if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) { 3769 // Build a list of all constant operands. 3770 SmallVector<Constant *, 8> ConstOps; 3771 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3772 if (I->getOperand(i) == Op) 3773 ConstOps.push_back(CRepOp); 3774 else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i))) 3775 ConstOps.push_back(COp); 3776 else 3777 break; 3778 } 3779 3780 // All operands were constants, fold it. 3781 if (ConstOps.size() == I->getNumOperands()) { 3782 if (CmpInst *C = dyn_cast<CmpInst>(I)) 3783 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0], 3784 ConstOps[1], Q.DL, Q.TLI); 3785 3786 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 3787 if (!LI->isVolatile()) 3788 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL); 3789 3790 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI); 3791 } 3792 } 3793 3794 return nullptr; 3795 } 3796 3797 /// Try to simplify a select instruction when its condition operand is an 3798 /// integer comparison where one operand of the compare is a constant. 3799 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, 3800 const APInt *Y, bool TrueWhenUnset) { 3801 const APInt *C; 3802 3803 // (X & Y) == 0 ? X & ~Y : X --> X 3804 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y 3805 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) && 3806 *Y == ~*C) 3807 return TrueWhenUnset ? FalseVal : TrueVal; 3808 3809 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y 3810 // (X & Y) != 0 ? X : X & ~Y --> X 3811 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) && 3812 *Y == ~*C) 3813 return TrueWhenUnset ? FalseVal : TrueVal; 3814 3815 if (Y->isPowerOf2()) { 3816 // (X & Y) == 0 ? X | Y : X --> X | Y 3817 // (X & Y) != 0 ? X | Y : X --> X 3818 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) && 3819 *Y == *C) 3820 return TrueWhenUnset ? TrueVal : FalseVal; 3821 3822 // (X & Y) == 0 ? X : X | Y --> X 3823 // (X & Y) != 0 ? X : X | Y --> X | Y 3824 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) && 3825 *Y == *C) 3826 return TrueWhenUnset ? TrueVal : FalseVal; 3827 } 3828 3829 return nullptr; 3830 } 3831 3832 /// An alternative way to test if a bit is set or not uses sgt/slt instead of 3833 /// eq/ne. 3834 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS, 3835 ICmpInst::Predicate Pred, 3836 Value *TrueVal, Value *FalseVal) { 3837 Value *X; 3838 APInt Mask; 3839 if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask)) 3840 return nullptr; 3841 3842 return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask, 3843 Pred == ICmpInst::ICMP_EQ); 3844 } 3845 3846 /// Try to simplify a select instruction when its condition operand is an 3847 /// integer comparison. 3848 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, 3849 Value *FalseVal, const SimplifyQuery &Q, 3850 unsigned MaxRecurse) { 3851 ICmpInst::Predicate Pred; 3852 Value *CmpLHS, *CmpRHS; 3853 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS)))) 3854 return nullptr; 3855 3856 if (ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero())) { 3857 Value *X; 3858 const APInt *Y; 3859 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y)))) 3860 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y, 3861 Pred == ICmpInst::ICMP_EQ)) 3862 return V; 3863 3864 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate. 3865 Value *ShAmt; 3866 auto isFsh = m_CombineOr(m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), 3867 m_Value(ShAmt)), 3868 m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), 3869 m_Value(ShAmt))); 3870 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X 3871 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X 3872 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt && 3873 Pred == ICmpInst::ICMP_EQ) 3874 return X; 3875 // (ShAmt != 0) ? X : fshl(X, *, ShAmt) --> X 3876 // (ShAmt != 0) ? X : fshr(*, X, ShAmt) --> X 3877 if (match(FalseVal, isFsh) && TrueVal == X && CmpLHS == ShAmt && 3878 Pred == ICmpInst::ICMP_NE) 3879 return X; 3880 3881 // Test for a zero-shift-guard-op around rotates. These are used to 3882 // avoid UB from oversized shifts in raw IR rotate patterns, but the 3883 // intrinsics do not have that problem. 3884 // We do not allow this transform for the general funnel shift case because 3885 // that would not preserve the poison safety of the original code. 3886 auto isRotate = m_CombineOr(m_Intrinsic<Intrinsic::fshl>(m_Value(X), 3887 m_Deferred(X), 3888 m_Value(ShAmt)), 3889 m_Intrinsic<Intrinsic::fshr>(m_Value(X), 3890 m_Deferred(X), 3891 m_Value(ShAmt))); 3892 // (ShAmt != 0) ? fshl(X, X, ShAmt) : X --> fshl(X, X, ShAmt) 3893 // (ShAmt != 0) ? fshr(X, X, ShAmt) : X --> fshr(X, X, ShAmt) 3894 if (match(TrueVal, isRotate) && FalseVal == X && CmpLHS == ShAmt && 3895 Pred == ICmpInst::ICMP_NE) 3896 return TrueVal; 3897 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt) 3898 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt) 3899 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt && 3900 Pred == ICmpInst::ICMP_EQ) 3901 return FalseVal; 3902 } 3903 3904 // Check for other compares that behave like bit test. 3905 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred, 3906 TrueVal, FalseVal)) 3907 return V; 3908 3909 // If we have an equality comparison, then we know the value in one of the 3910 // arms of the select. See if substituting this value into the arm and 3911 // simplifying the result yields the same value as the other arm. 3912 if (Pred == ICmpInst::ICMP_EQ) { 3913 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3914 TrueVal || 3915 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3916 TrueVal) 3917 return FalseVal; 3918 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3919 FalseVal || 3920 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3921 FalseVal) 3922 return FalseVal; 3923 } else if (Pred == ICmpInst::ICMP_NE) { 3924 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3925 FalseVal || 3926 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3927 FalseVal) 3928 return TrueVal; 3929 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3930 TrueVal || 3931 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3932 TrueVal) 3933 return TrueVal; 3934 } 3935 3936 return nullptr; 3937 } 3938 3939 /// Try to simplify a select instruction when its condition operand is a 3940 /// floating-point comparison. 3941 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, 3942 const SimplifyQuery &Q) { 3943 FCmpInst::Predicate Pred; 3944 if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) && 3945 !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T)))) 3946 return nullptr; 3947 3948 // This transform is safe if we do not have (do not care about) -0.0 or if 3949 // at least one operand is known to not be -0.0. Otherwise, the select can 3950 // change the sign of a zero operand. 3951 bool HasNoSignedZeros = Q.CxtI && isa<FPMathOperator>(Q.CxtI) && 3952 Q.CxtI->hasNoSignedZeros(); 3953 const APFloat *C; 3954 if (HasNoSignedZeros || (match(T, m_APFloat(C)) && C->isNonZero()) || 3955 (match(F, m_APFloat(C)) && C->isNonZero())) { 3956 // (T == F) ? T : F --> F 3957 // (F == T) ? T : F --> F 3958 if (Pred == FCmpInst::FCMP_OEQ) 3959 return F; 3960 3961 // (T != F) ? T : F --> T 3962 // (F != T) ? T : F --> T 3963 if (Pred == FCmpInst::FCMP_UNE) 3964 return T; 3965 } 3966 3967 return nullptr; 3968 } 3969 3970 /// Given operands for a SelectInst, see if we can fold the result. 3971 /// If not, this returns null. 3972 static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, 3973 const SimplifyQuery &Q, unsigned MaxRecurse) { 3974 if (auto *CondC = dyn_cast<Constant>(Cond)) { 3975 if (auto *TrueC = dyn_cast<Constant>(TrueVal)) 3976 if (auto *FalseC = dyn_cast<Constant>(FalseVal)) 3977 return ConstantFoldSelectInstruction(CondC, TrueC, FalseC); 3978 3979 // select undef, X, Y -> X or Y 3980 if (isa<UndefValue>(CondC)) 3981 return isa<Constant>(FalseVal) ? FalseVal : TrueVal; 3982 3983 // TODO: Vector constants with undef elements don't simplify. 3984 3985 // select true, X, Y -> X 3986 if (CondC->isAllOnesValue()) 3987 return TrueVal; 3988 // select false, X, Y -> Y 3989 if (CondC->isNullValue()) 3990 return FalseVal; 3991 } 3992 3993 // select i1 Cond, i1 true, i1 false --> i1 Cond 3994 assert(Cond->getType()->isIntOrIntVectorTy(1) && 3995 "Select must have bool or bool vector condition"); 3996 assert(TrueVal->getType() == FalseVal->getType() && 3997 "Select must have same types for true/false ops"); 3998 if (Cond->getType() == TrueVal->getType() && 3999 match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt())) 4000 return Cond; 4001 4002 // select ?, X, X -> X 4003 if (TrueVal == FalseVal) 4004 return TrueVal; 4005 4006 if (isa<UndefValue>(TrueVal)) // select ?, undef, X -> X 4007 return FalseVal; 4008 if (isa<UndefValue>(FalseVal)) // select ?, X, undef -> X 4009 return TrueVal; 4010 4011 // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC'' 4012 Constant *TrueC, *FalseC; 4013 if (TrueVal->getType()->isVectorTy() && match(TrueVal, m_Constant(TrueC)) && 4014 match(FalseVal, m_Constant(FalseC))) { 4015 unsigned NumElts = TrueC->getType()->getVectorNumElements(); 4016 SmallVector<Constant *, 16> NewC; 4017 for (unsigned i = 0; i != NumElts; ++i) { 4018 // Bail out on incomplete vector constants. 4019 Constant *TEltC = TrueC->getAggregateElement(i); 4020 Constant *FEltC = FalseC->getAggregateElement(i); 4021 if (!TEltC || !FEltC) 4022 break; 4023 4024 // If the elements match (undef or not), that value is the result. If only 4025 // one element is undef, choose the defined element as the safe result. 4026 if (TEltC == FEltC) 4027 NewC.push_back(TEltC); 4028 else if (isa<UndefValue>(TEltC)) 4029 NewC.push_back(FEltC); 4030 else if (isa<UndefValue>(FEltC)) 4031 NewC.push_back(TEltC); 4032 else 4033 break; 4034 } 4035 if (NewC.size() == NumElts) 4036 return ConstantVector::get(NewC); 4037 } 4038 4039 if (Value *V = 4040 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse)) 4041 return V; 4042 4043 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q)) 4044 return V; 4045 4046 if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal)) 4047 return V; 4048 4049 Optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL); 4050 if (Imp) 4051 return *Imp ? TrueVal : FalseVal; 4052 4053 return nullptr; 4054 } 4055 4056 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, 4057 const SimplifyQuery &Q) { 4058 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit); 4059 } 4060 4061 /// Given operands for an GetElementPtrInst, see if we can fold the result. 4062 /// If not, this returns null. 4063 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, 4064 const SimplifyQuery &Q, unsigned) { 4065 // The type of the GEP pointer operand. 4066 unsigned AS = 4067 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace(); 4068 4069 // getelementptr P -> P. 4070 if (Ops.size() == 1) 4071 return Ops[0]; 4072 4073 // Compute the (pointer) type returned by the GEP instruction. 4074 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1)); 4075 Type *GEPTy = PointerType::get(LastType, AS); 4076 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType())) 4077 GEPTy = VectorType::get(GEPTy, VT->getNumElements()); 4078 else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType())) 4079 GEPTy = VectorType::get(GEPTy, VT->getNumElements()); 4080 4081 if (isa<UndefValue>(Ops[0])) 4082 return UndefValue::get(GEPTy); 4083 4084 if (Ops.size() == 2) { 4085 // getelementptr P, 0 -> P. 4086 if (match(Ops[1], m_Zero()) && Ops[0]->getType() == GEPTy) 4087 return Ops[0]; 4088 4089 Type *Ty = SrcTy; 4090 if (Ty->isSized()) { 4091 Value *P; 4092 uint64_t C; 4093 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty); 4094 // getelementptr P, N -> P if P points to a type of zero size. 4095 if (TyAllocSize == 0 && Ops[0]->getType() == GEPTy) 4096 return Ops[0]; 4097 4098 // The following transforms are only safe if the ptrtoint cast 4099 // doesn't truncate the pointers. 4100 if (Ops[1]->getType()->getScalarSizeInBits() == 4101 Q.DL.getPointerSizeInBits(AS)) { 4102 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * { 4103 if (match(P, m_Zero())) 4104 return Constant::getNullValue(GEPTy); 4105 Value *Temp; 4106 if (match(P, m_PtrToInt(m_Value(Temp)))) 4107 if (Temp->getType() == GEPTy) 4108 return Temp; 4109 return nullptr; 4110 }; 4111 4112 // getelementptr V, (sub P, V) -> P if P points to a type of size 1. 4113 if (TyAllocSize == 1 && 4114 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))))) 4115 if (Value *R = PtrToIntOrZero(P)) 4116 return R; 4117 4118 // getelementptr V, (ashr (sub P, V), C) -> Q 4119 // if P points to a type of size 1 << C. 4120 if (match(Ops[1], 4121 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 4122 m_ConstantInt(C))) && 4123 TyAllocSize == 1ULL << C) 4124 if (Value *R = PtrToIntOrZero(P)) 4125 return R; 4126 4127 // getelementptr V, (sdiv (sub P, V), C) -> Q 4128 // if P points to a type of size C. 4129 if (match(Ops[1], 4130 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 4131 m_SpecificInt(TyAllocSize)))) 4132 if (Value *R = PtrToIntOrZero(P)) 4133 return R; 4134 } 4135 } 4136 } 4137 4138 if (Q.DL.getTypeAllocSize(LastType) == 1 && 4139 all_of(Ops.slice(1).drop_back(1), 4140 [](Value *Idx) { return match(Idx, m_Zero()); })) { 4141 unsigned IdxWidth = 4142 Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace()); 4143 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) { 4144 APInt BasePtrOffset(IdxWidth, 0); 4145 Value *StrippedBasePtr = 4146 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL, 4147 BasePtrOffset); 4148 4149 // gep (gep V, C), (sub 0, V) -> C 4150 if (match(Ops.back(), 4151 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr))))) { 4152 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset); 4153 return ConstantExpr::getIntToPtr(CI, GEPTy); 4154 } 4155 // gep (gep V, C), (xor V, -1) -> C-1 4156 if (match(Ops.back(), 4157 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes()))) { 4158 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1); 4159 return ConstantExpr::getIntToPtr(CI, GEPTy); 4160 } 4161 } 4162 } 4163 4164 // Check to see if this is constant foldable. 4165 if (!all_of(Ops, [](Value *V) { return isa<Constant>(V); })) 4166 return nullptr; 4167 4168 auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]), 4169 Ops.slice(1)); 4170 if (auto *CEFolded = ConstantFoldConstant(CE, Q.DL)) 4171 return CEFolded; 4172 return CE; 4173 } 4174 4175 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, 4176 const SimplifyQuery &Q) { 4177 return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit); 4178 } 4179 4180 /// Given operands for an InsertValueInst, see if we can fold the result. 4181 /// If not, this returns null. 4182 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val, 4183 ArrayRef<unsigned> Idxs, const SimplifyQuery &Q, 4184 unsigned) { 4185 if (Constant *CAgg = dyn_cast<Constant>(Agg)) 4186 if (Constant *CVal = dyn_cast<Constant>(Val)) 4187 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs); 4188 4189 // insertvalue x, undef, n -> x 4190 if (match(Val, m_Undef())) 4191 return Agg; 4192 4193 // insertvalue x, (extractvalue y, n), n 4194 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val)) 4195 if (EV->getAggregateOperand()->getType() == Agg->getType() && 4196 EV->getIndices() == Idxs) { 4197 // insertvalue undef, (extractvalue y, n), n -> y 4198 if (match(Agg, m_Undef())) 4199 return EV->getAggregateOperand(); 4200 4201 // insertvalue y, (extractvalue y, n), n -> y 4202 if (Agg == EV->getAggregateOperand()) 4203 return Agg; 4204 } 4205 4206 return nullptr; 4207 } 4208 4209 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val, 4210 ArrayRef<unsigned> Idxs, 4211 const SimplifyQuery &Q) { 4212 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit); 4213 } 4214 4215 Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx, 4216 const SimplifyQuery &Q) { 4217 // Try to constant fold. 4218 auto *VecC = dyn_cast<Constant>(Vec); 4219 auto *ValC = dyn_cast<Constant>(Val); 4220 auto *IdxC = dyn_cast<Constant>(Idx); 4221 if (VecC && ValC && IdxC) 4222 return ConstantFoldInsertElementInstruction(VecC, ValC, IdxC); 4223 4224 // Fold into undef if index is out of bounds. 4225 if (auto *CI = dyn_cast<ConstantInt>(Idx)) { 4226 uint64_t NumElements = cast<VectorType>(Vec->getType())->getNumElements(); 4227 if (CI->uge(NumElements)) 4228 return UndefValue::get(Vec->getType()); 4229 } 4230 4231 // If index is undef, it might be out of bounds (see above case) 4232 if (isa<UndefValue>(Idx)) 4233 return UndefValue::get(Vec->getType()); 4234 4235 // Inserting an undef scalar? Assume it is the same value as the existing 4236 // vector element. 4237 if (isa<UndefValue>(Val)) 4238 return Vec; 4239 4240 // If we are extracting a value from a vector, then inserting it into the same 4241 // place, that's the input vector: 4242 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec 4243 if (match(Val, m_ExtractElement(m_Specific(Vec), m_Specific(Idx)))) 4244 return Vec; 4245 4246 return nullptr; 4247 } 4248 4249 /// Given operands for an ExtractValueInst, see if we can fold the result. 4250 /// If not, this returns null. 4251 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 4252 const SimplifyQuery &, unsigned) { 4253 if (auto *CAgg = dyn_cast<Constant>(Agg)) 4254 return ConstantFoldExtractValueInstruction(CAgg, Idxs); 4255 4256 // extractvalue x, (insertvalue y, elt, n), n -> elt 4257 unsigned NumIdxs = Idxs.size(); 4258 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr; 4259 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) { 4260 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices(); 4261 unsigned NumInsertValueIdxs = InsertValueIdxs.size(); 4262 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs); 4263 if (InsertValueIdxs.slice(0, NumCommonIdxs) == 4264 Idxs.slice(0, NumCommonIdxs)) { 4265 if (NumIdxs == NumInsertValueIdxs) 4266 return IVI->getInsertedValueOperand(); 4267 break; 4268 } 4269 } 4270 4271 return nullptr; 4272 } 4273 4274 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 4275 const SimplifyQuery &Q) { 4276 return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit); 4277 } 4278 4279 /// Given operands for an ExtractElementInst, see if we can fold the result. 4280 /// If not, this returns null. 4281 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &, 4282 unsigned) { 4283 if (auto *CVec = dyn_cast<Constant>(Vec)) { 4284 if (auto *CIdx = dyn_cast<Constant>(Idx)) 4285 return ConstantFoldExtractElementInstruction(CVec, CIdx); 4286 4287 // The index is not relevant if our vector is a splat. 4288 if (auto *Splat = CVec->getSplatValue()) 4289 return Splat; 4290 4291 if (isa<UndefValue>(Vec)) 4292 return UndefValue::get(Vec->getType()->getVectorElementType()); 4293 } 4294 4295 // If extracting a specified index from the vector, see if we can recursively 4296 // find a previously computed scalar that was inserted into the vector. 4297 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) { 4298 if (IdxC->getValue().uge(Vec->getType()->getVectorNumElements())) 4299 // definitely out of bounds, thus undefined result 4300 return UndefValue::get(Vec->getType()->getVectorElementType()); 4301 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue())) 4302 return Elt; 4303 } 4304 4305 // An undef extract index can be arbitrarily chosen to be an out-of-range 4306 // index value, which would result in the instruction being undef. 4307 if (isa<UndefValue>(Idx)) 4308 return UndefValue::get(Vec->getType()->getVectorElementType()); 4309 4310 return nullptr; 4311 } 4312 4313 Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx, 4314 const SimplifyQuery &Q) { 4315 return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit); 4316 } 4317 4318 /// See if we can fold the given phi. If not, returns null. 4319 static Value *SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q) { 4320 // If all of the PHI's incoming values are the same then replace the PHI node 4321 // with the common value. 4322 Value *CommonValue = nullptr; 4323 bool HasUndefInput = false; 4324 for (Value *Incoming : PN->incoming_values()) { 4325 // If the incoming value is the phi node itself, it can safely be skipped. 4326 if (Incoming == PN) continue; 4327 if (isa<UndefValue>(Incoming)) { 4328 // Remember that we saw an undef value, but otherwise ignore them. 4329 HasUndefInput = true; 4330 continue; 4331 } 4332 if (CommonValue && Incoming != CommonValue) 4333 return nullptr; // Not the same, bail out. 4334 CommonValue = Incoming; 4335 } 4336 4337 // If CommonValue is null then all of the incoming values were either undef or 4338 // equal to the phi node itself. 4339 if (!CommonValue) 4340 return UndefValue::get(PN->getType()); 4341 4342 // If we have a PHI node like phi(X, undef, X), where X is defined by some 4343 // instruction, we cannot return X as the result of the PHI node unless it 4344 // dominates the PHI block. 4345 if (HasUndefInput) 4346 return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr; 4347 4348 return CommonValue; 4349 } 4350 4351 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op, 4352 Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) { 4353 if (auto *C = dyn_cast<Constant>(Op)) 4354 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL); 4355 4356 if (auto *CI = dyn_cast<CastInst>(Op)) { 4357 auto *Src = CI->getOperand(0); 4358 Type *SrcTy = Src->getType(); 4359 Type *MidTy = CI->getType(); 4360 Type *DstTy = Ty; 4361 if (Src->getType() == Ty) { 4362 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode()); 4363 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc); 4364 Type *SrcIntPtrTy = 4365 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr; 4366 Type *MidIntPtrTy = 4367 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr; 4368 Type *DstIntPtrTy = 4369 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr; 4370 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy, 4371 SrcIntPtrTy, MidIntPtrTy, 4372 DstIntPtrTy) == Instruction::BitCast) 4373 return Src; 4374 } 4375 } 4376 4377 // bitcast x -> x 4378 if (CastOpc == Instruction::BitCast) 4379 if (Op->getType() == Ty) 4380 return Op; 4381 4382 return nullptr; 4383 } 4384 4385 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, 4386 const SimplifyQuery &Q) { 4387 return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit); 4388 } 4389 4390 /// For the given destination element of a shuffle, peek through shuffles to 4391 /// match a root vector source operand that contains that element in the same 4392 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s). 4393 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, 4394 int MaskVal, Value *RootVec, 4395 unsigned MaxRecurse) { 4396 if (!MaxRecurse--) 4397 return nullptr; 4398 4399 // Bail out if any mask value is undefined. That kind of shuffle may be 4400 // simplified further based on demanded bits or other folds. 4401 if (MaskVal == -1) 4402 return nullptr; 4403 4404 // The mask value chooses which source operand we need to look at next. 4405 int InVecNumElts = Op0->getType()->getVectorNumElements(); 4406 int RootElt = MaskVal; 4407 Value *SourceOp = Op0; 4408 if (MaskVal >= InVecNumElts) { 4409 RootElt = MaskVal - InVecNumElts; 4410 SourceOp = Op1; 4411 } 4412 4413 // If the source operand is a shuffle itself, look through it to find the 4414 // matching root vector. 4415 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) { 4416 return foldIdentityShuffles( 4417 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1), 4418 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse); 4419 } 4420 4421 // TODO: Look through bitcasts? What if the bitcast changes the vector element 4422 // size? 4423 4424 // The source operand is not a shuffle. Initialize the root vector value for 4425 // this shuffle if that has not been done yet. 4426 if (!RootVec) 4427 RootVec = SourceOp; 4428 4429 // Give up as soon as a source operand does not match the existing root value. 4430 if (RootVec != SourceOp) 4431 return nullptr; 4432 4433 // The element must be coming from the same lane in the source vector 4434 // (although it may have crossed lanes in intermediate shuffles). 4435 if (RootElt != DestElt) 4436 return nullptr; 4437 4438 return RootVec; 4439 } 4440 4441 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask, 4442 Type *RetTy, const SimplifyQuery &Q, 4443 unsigned MaxRecurse) { 4444 if (isa<UndefValue>(Mask)) 4445 return UndefValue::get(RetTy); 4446 4447 Type *InVecTy = Op0->getType(); 4448 unsigned MaskNumElts = Mask->getType()->getVectorNumElements(); 4449 unsigned InVecNumElts = InVecTy->getVectorNumElements(); 4450 4451 SmallVector<int, 32> Indices; 4452 ShuffleVectorInst::getShuffleMask(Mask, Indices); 4453 assert(MaskNumElts == Indices.size() && 4454 "Size of Indices not same as number of mask elements?"); 4455 4456 // Canonicalization: If mask does not select elements from an input vector, 4457 // replace that input vector with undef. 4458 bool MaskSelects0 = false, MaskSelects1 = false; 4459 for (unsigned i = 0; i != MaskNumElts; ++i) { 4460 if (Indices[i] == -1) 4461 continue; 4462 if ((unsigned)Indices[i] < InVecNumElts) 4463 MaskSelects0 = true; 4464 else 4465 MaskSelects1 = true; 4466 } 4467 if (!MaskSelects0) 4468 Op0 = UndefValue::get(InVecTy); 4469 if (!MaskSelects1) 4470 Op1 = UndefValue::get(InVecTy); 4471 4472 auto *Op0Const = dyn_cast<Constant>(Op0); 4473 auto *Op1Const = dyn_cast<Constant>(Op1); 4474 4475 // If all operands are constant, constant fold the shuffle. 4476 if (Op0Const && Op1Const) 4477 return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask); 4478 4479 // Canonicalization: if only one input vector is constant, it shall be the 4480 // second one. 4481 if (Op0Const && !Op1Const) { 4482 std::swap(Op0, Op1); 4483 ShuffleVectorInst::commuteShuffleMask(Indices, InVecNumElts); 4484 } 4485 4486 // A splat of an inserted scalar constant becomes a vector constant: 4487 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...> 4488 // NOTE: We may have commuted above, so analyze the updated Indices, not the 4489 // original mask constant. 4490 Constant *C; 4491 ConstantInt *IndexC; 4492 if (match(Op0, m_InsertElement(m_Value(), m_Constant(C), 4493 m_ConstantInt(IndexC)))) { 4494 // Match a splat shuffle mask of the insert index allowing undef elements. 4495 int InsertIndex = IndexC->getZExtValue(); 4496 if (all_of(Indices, [InsertIndex](int MaskElt) { 4497 return MaskElt == InsertIndex || MaskElt == -1; 4498 })) { 4499 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat"); 4500 4501 // Shuffle mask undefs become undefined constant result elements. 4502 SmallVector<Constant *, 16> VecC(MaskNumElts, C); 4503 for (unsigned i = 0; i != MaskNumElts; ++i) 4504 if (Indices[i] == -1) 4505 VecC[i] = UndefValue::get(C->getType()); 4506 return ConstantVector::get(VecC); 4507 } 4508 } 4509 4510 // A shuffle of a splat is always the splat itself. Legal if the shuffle's 4511 // value type is same as the input vectors' type. 4512 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0)) 4513 if (isa<UndefValue>(Op1) && RetTy == InVecTy && 4514 OpShuf->getMask()->getSplatValue()) 4515 return Op0; 4516 4517 // Don't fold a shuffle with undef mask elements. This may get folded in a 4518 // better way using demanded bits or other analysis. 4519 // TODO: Should we allow this? 4520 if (find(Indices, -1) != Indices.end()) 4521 return nullptr; 4522 4523 // Check if every element of this shuffle can be mapped back to the 4524 // corresponding element of a single root vector. If so, we don't need this 4525 // shuffle. This handles simple identity shuffles as well as chains of 4526 // shuffles that may widen/narrow and/or move elements across lanes and back. 4527 Value *RootVec = nullptr; 4528 for (unsigned i = 0; i != MaskNumElts; ++i) { 4529 // Note that recursion is limited for each vector element, so if any element 4530 // exceeds the limit, this will fail to simplify. 4531 RootVec = 4532 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse); 4533 4534 // We can't replace a widening/narrowing shuffle with one of its operands. 4535 if (!RootVec || RootVec->getType() != RetTy) 4536 return nullptr; 4537 } 4538 return RootVec; 4539 } 4540 4541 /// Given operands for a ShuffleVectorInst, fold the result or return null. 4542 Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask, 4543 Type *RetTy, const SimplifyQuery &Q) { 4544 return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit); 4545 } 4546 4547 static Constant *foldConstant(Instruction::UnaryOps Opcode, 4548 Value *&Op, const SimplifyQuery &Q) { 4549 if (auto *C = dyn_cast<Constant>(Op)) 4550 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL); 4551 return nullptr; 4552 } 4553 4554 /// Given the operand for an FNeg, see if we can fold the result. If not, this 4555 /// returns null. 4556 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF, 4557 const SimplifyQuery &Q, unsigned MaxRecurse) { 4558 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q)) 4559 return C; 4560 4561 Value *X; 4562 // fneg (fneg X) ==> X 4563 if (match(Op, m_FNeg(m_Value(X)))) 4564 return X; 4565 4566 return nullptr; 4567 } 4568 4569 Value *llvm::SimplifyFNegInst(Value *Op, FastMathFlags FMF, 4570 const SimplifyQuery &Q) { 4571 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit); 4572 } 4573 4574 static Constant *propagateNaN(Constant *In) { 4575 // If the input is a vector with undef elements, just return a default NaN. 4576 if (!In->isNaN()) 4577 return ConstantFP::getNaN(In->getType()); 4578 4579 // Propagate the existing NaN constant when possible. 4580 // TODO: Should we quiet a signaling NaN? 4581 return In; 4582 } 4583 4584 /// Perform folds that are common to any floating-point operation. This implies 4585 /// transforms based on undef/NaN because the operation itself makes no 4586 /// difference to the result. 4587 static Constant *simplifyFPOp(ArrayRef<Value *> Ops) { 4588 if (any_of(Ops, [](Value *V) { return isa<UndefValue>(V); })) 4589 return ConstantFP::getNaN(Ops[0]->getType()); 4590 4591 for (Value *V : Ops) 4592 if (match(V, m_NaN())) 4593 return propagateNaN(cast<Constant>(V)); 4594 4595 return nullptr; 4596 } 4597 4598 /// Given operands for an FAdd, see if we can fold the result. If not, this 4599 /// returns null. 4600 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4601 const SimplifyQuery &Q, unsigned MaxRecurse) { 4602 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q)) 4603 return C; 4604 4605 if (Constant *C = simplifyFPOp({Op0, Op1})) 4606 return C; 4607 4608 // fadd X, -0 ==> X 4609 if (match(Op1, m_NegZeroFP())) 4610 return Op0; 4611 4612 // fadd X, 0 ==> X, when we know X is not -0 4613 if (match(Op1, m_PosZeroFP()) && 4614 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) 4615 return Op0; 4616 4617 // With nnan: -X + X --> 0.0 (and commuted variant) 4618 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN. 4619 // Negative zeros are allowed because we always end up with positive zero: 4620 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0 4621 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0 4622 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0 4623 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0 4624 if (FMF.noNaNs()) { 4625 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) || 4626 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0)))) 4627 return ConstantFP::getNullValue(Op0->getType()); 4628 4629 if (match(Op0, m_FNeg(m_Specific(Op1))) || 4630 match(Op1, m_FNeg(m_Specific(Op0)))) 4631 return ConstantFP::getNullValue(Op0->getType()); 4632 } 4633 4634 // (X - Y) + Y --> X 4635 // Y + (X - Y) --> X 4636 Value *X; 4637 if (FMF.noSignedZeros() && FMF.allowReassoc() && 4638 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) || 4639 match(Op1, m_FSub(m_Value(X), m_Specific(Op0))))) 4640 return X; 4641 4642 return nullptr; 4643 } 4644 4645 /// Given operands for an FSub, see if we can fold the result. If not, this 4646 /// returns null. 4647 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4648 const SimplifyQuery &Q, unsigned MaxRecurse) { 4649 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q)) 4650 return C; 4651 4652 if (Constant *C = simplifyFPOp({Op0, Op1})) 4653 return C; 4654 4655 // fsub X, +0 ==> X 4656 if (match(Op1, m_PosZeroFP())) 4657 return Op0; 4658 4659 // fsub X, -0 ==> X, when we know X is not -0 4660 if (match(Op1, m_NegZeroFP()) && 4661 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) 4662 return Op0; 4663 4664 // fsub -0.0, (fsub -0.0, X) ==> X 4665 // fsub -0.0, (fneg X) ==> X 4666 Value *X; 4667 if (match(Op0, m_NegZeroFP()) && 4668 match(Op1, m_FNeg(m_Value(X)))) 4669 return X; 4670 4671 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored. 4672 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored. 4673 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) && 4674 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) || 4675 match(Op1, m_FNeg(m_Value(X))))) 4676 return X; 4677 4678 // fsub nnan x, x ==> 0.0 4679 if (FMF.noNaNs() && Op0 == Op1) 4680 return Constant::getNullValue(Op0->getType()); 4681 4682 // Y - (Y - X) --> X 4683 // (X + Y) - Y --> X 4684 if (FMF.noSignedZeros() && FMF.allowReassoc() && 4685 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) || 4686 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X))))) 4687 return X; 4688 4689 return nullptr; 4690 } 4691 4692 static Value *SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, 4693 const SimplifyQuery &Q, unsigned MaxRecurse) { 4694 if (Constant *C = simplifyFPOp({Op0, Op1})) 4695 return C; 4696 4697 // fmul X, 1.0 ==> X 4698 if (match(Op1, m_FPOne())) 4699 return Op0; 4700 4701 // fmul 1.0, X ==> X 4702 if (match(Op0, m_FPOne())) 4703 return Op1; 4704 4705 // fmul nnan nsz X, 0 ==> 0 4706 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZeroFP())) 4707 return ConstantFP::getNullValue(Op0->getType()); 4708 4709 // fmul nnan nsz 0, X ==> 0 4710 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP())) 4711 return ConstantFP::getNullValue(Op1->getType()); 4712 4713 // sqrt(X) * sqrt(X) --> X, if we can: 4714 // 1. Remove the intermediate rounding (reassociate). 4715 // 2. Ignore non-zero negative numbers because sqrt would produce NAN. 4716 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0. 4717 Value *X; 4718 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) && 4719 FMF.allowReassoc() && FMF.noNaNs() && FMF.noSignedZeros()) 4720 return X; 4721 4722 return nullptr; 4723 } 4724 4725 /// Given the operands for an FMul, see if we can fold the result 4726 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4727 const SimplifyQuery &Q, unsigned MaxRecurse) { 4728 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q)) 4729 return C; 4730 4731 // Now apply simplifications that do not require rounding. 4732 return SimplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse); 4733 } 4734 4735 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4736 const SimplifyQuery &Q) { 4737 return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit); 4738 } 4739 4740 4741 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4742 const SimplifyQuery &Q) { 4743 return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit); 4744 } 4745 4746 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4747 const SimplifyQuery &Q) { 4748 return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit); 4749 } 4750 4751 Value *llvm::SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, 4752 const SimplifyQuery &Q) { 4753 return ::SimplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit); 4754 } 4755 4756 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4757 const SimplifyQuery &Q, unsigned) { 4758 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q)) 4759 return C; 4760 4761 if (Constant *C = simplifyFPOp({Op0, Op1})) 4762 return C; 4763 4764 // X / 1.0 -> X 4765 if (match(Op1, m_FPOne())) 4766 return Op0; 4767 4768 // 0 / X -> 0 4769 // Requires that NaNs are off (X could be zero) and signed zeroes are 4770 // ignored (X could be positive or negative, so the output sign is unknown). 4771 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP())) 4772 return ConstantFP::getNullValue(Op0->getType()); 4773 4774 if (FMF.noNaNs()) { 4775 // X / X -> 1.0 is legal when NaNs are ignored. 4776 // We can ignore infinities because INF/INF is NaN. 4777 if (Op0 == Op1) 4778 return ConstantFP::get(Op0->getType(), 1.0); 4779 4780 // (X * Y) / Y --> X if we can reassociate to the above form. 4781 Value *X; 4782 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1)))) 4783 return X; 4784 4785 // -X / X -> -1.0 and 4786 // X / -X -> -1.0 are legal when NaNs are ignored. 4787 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored. 4788 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) || 4789 match(Op1, m_FNegNSZ(m_Specific(Op0)))) 4790 return ConstantFP::get(Op0->getType(), -1.0); 4791 } 4792 4793 return nullptr; 4794 } 4795 4796 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4797 const SimplifyQuery &Q) { 4798 return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit); 4799 } 4800 4801 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4802 const SimplifyQuery &Q, unsigned) { 4803 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q)) 4804 return C; 4805 4806 if (Constant *C = simplifyFPOp({Op0, Op1})) 4807 return C; 4808 4809 // Unlike fdiv, the result of frem always matches the sign of the dividend. 4810 // The constant match may include undef elements in a vector, so return a full 4811 // zero constant as the result. 4812 if (FMF.noNaNs()) { 4813 // +0 % X -> 0 4814 if (match(Op0, m_PosZeroFP())) 4815 return ConstantFP::getNullValue(Op0->getType()); 4816 // -0 % X -> -0 4817 if (match(Op0, m_NegZeroFP())) 4818 return ConstantFP::getNegativeZero(Op0->getType()); 4819 } 4820 4821 return nullptr; 4822 } 4823 4824 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4825 const SimplifyQuery &Q) { 4826 return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit); 4827 } 4828 4829 //=== Helper functions for higher up the class hierarchy. 4830 4831 /// Given the operand for a UnaryOperator, see if we can fold the result. 4832 /// If not, this returns null. 4833 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q, 4834 unsigned MaxRecurse) { 4835 switch (Opcode) { 4836 case Instruction::FNeg: 4837 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse); 4838 default: 4839 llvm_unreachable("Unexpected opcode"); 4840 } 4841 } 4842 4843 /// Given the operand for a UnaryOperator, see if we can fold the result. 4844 /// If not, this returns null. 4845 /// Try to use FastMathFlags when folding the result. 4846 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op, 4847 const FastMathFlags &FMF, 4848 const SimplifyQuery &Q, unsigned MaxRecurse) { 4849 switch (Opcode) { 4850 case Instruction::FNeg: 4851 return simplifyFNegInst(Op, FMF, Q, MaxRecurse); 4852 default: 4853 return simplifyUnOp(Opcode, Op, Q, MaxRecurse); 4854 } 4855 } 4856 4857 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) { 4858 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit); 4859 } 4860 4861 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF, 4862 const SimplifyQuery &Q) { 4863 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit); 4864 } 4865 4866 /// Given operands for a BinaryOperator, see if we can fold the result. 4867 /// If not, this returns null. 4868 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4869 const SimplifyQuery &Q, unsigned MaxRecurse) { 4870 switch (Opcode) { 4871 case Instruction::Add: 4872 return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse); 4873 case Instruction::Sub: 4874 return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse); 4875 case Instruction::Mul: 4876 return SimplifyMulInst(LHS, RHS, Q, MaxRecurse); 4877 case Instruction::SDiv: 4878 return SimplifySDivInst(LHS, RHS, Q, MaxRecurse); 4879 case Instruction::UDiv: 4880 return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse); 4881 case Instruction::SRem: 4882 return SimplifySRemInst(LHS, RHS, Q, MaxRecurse); 4883 case Instruction::URem: 4884 return SimplifyURemInst(LHS, RHS, Q, MaxRecurse); 4885 case Instruction::Shl: 4886 return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse); 4887 case Instruction::LShr: 4888 return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse); 4889 case Instruction::AShr: 4890 return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse); 4891 case Instruction::And: 4892 return SimplifyAndInst(LHS, RHS, Q, MaxRecurse); 4893 case Instruction::Or: 4894 return SimplifyOrInst(LHS, RHS, Q, MaxRecurse); 4895 case Instruction::Xor: 4896 return SimplifyXorInst(LHS, RHS, Q, MaxRecurse); 4897 case Instruction::FAdd: 4898 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4899 case Instruction::FSub: 4900 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4901 case Instruction::FMul: 4902 return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4903 case Instruction::FDiv: 4904 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4905 case Instruction::FRem: 4906 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4907 default: 4908 llvm_unreachable("Unexpected opcode"); 4909 } 4910 } 4911 4912 /// Given operands for a BinaryOperator, see if we can fold the result. 4913 /// If not, this returns null. 4914 /// Try to use FastMathFlags when folding the result. 4915 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4916 const FastMathFlags &FMF, const SimplifyQuery &Q, 4917 unsigned MaxRecurse) { 4918 switch (Opcode) { 4919 case Instruction::FAdd: 4920 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse); 4921 case Instruction::FSub: 4922 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse); 4923 case Instruction::FMul: 4924 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse); 4925 case Instruction::FDiv: 4926 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse); 4927 default: 4928 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse); 4929 } 4930 } 4931 4932 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4933 const SimplifyQuery &Q) { 4934 return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit); 4935 } 4936 4937 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4938 FastMathFlags FMF, const SimplifyQuery &Q) { 4939 return ::SimplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit); 4940 } 4941 4942 /// Given operands for a CmpInst, see if we can fold the result. 4943 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 4944 const SimplifyQuery &Q, unsigned MaxRecurse) { 4945 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate)) 4946 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse); 4947 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4948 } 4949 4950 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 4951 const SimplifyQuery &Q) { 4952 return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit); 4953 } 4954 4955 static bool IsIdempotent(Intrinsic::ID ID) { 4956 switch (ID) { 4957 default: return false; 4958 4959 // Unary idempotent: f(f(x)) = f(x) 4960 case Intrinsic::fabs: 4961 case Intrinsic::floor: 4962 case Intrinsic::ceil: 4963 case Intrinsic::trunc: 4964 case Intrinsic::rint: 4965 case Intrinsic::nearbyint: 4966 case Intrinsic::round: 4967 case Intrinsic::canonicalize: 4968 return true; 4969 } 4970 } 4971 4972 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset, 4973 const DataLayout &DL) { 4974 GlobalValue *PtrSym; 4975 APInt PtrOffset; 4976 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL)) 4977 return nullptr; 4978 4979 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext()); 4980 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext()); 4981 Type *Int32PtrTy = Int32Ty->getPointerTo(); 4982 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext()); 4983 4984 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset); 4985 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64) 4986 return nullptr; 4987 4988 uint64_t OffsetInt = OffsetConstInt->getSExtValue(); 4989 if (OffsetInt % 4 != 0) 4990 return nullptr; 4991 4992 Constant *C = ConstantExpr::getGetElementPtr( 4993 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy), 4994 ConstantInt::get(Int64Ty, OffsetInt / 4)); 4995 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL); 4996 if (!Loaded) 4997 return nullptr; 4998 4999 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded); 5000 if (!LoadedCE) 5001 return nullptr; 5002 5003 if (LoadedCE->getOpcode() == Instruction::Trunc) { 5004 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0)); 5005 if (!LoadedCE) 5006 return nullptr; 5007 } 5008 5009 if (LoadedCE->getOpcode() != Instruction::Sub) 5010 return nullptr; 5011 5012 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0)); 5013 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt) 5014 return nullptr; 5015 auto *LoadedLHSPtr = LoadedLHS->getOperand(0); 5016 5017 Constant *LoadedRHS = LoadedCE->getOperand(1); 5018 GlobalValue *LoadedRHSSym; 5019 APInt LoadedRHSOffset; 5020 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset, 5021 DL) || 5022 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset) 5023 return nullptr; 5024 5025 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy); 5026 } 5027 5028 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0, 5029 const SimplifyQuery &Q) { 5030 // Idempotent functions return the same result when called repeatedly. 5031 Intrinsic::ID IID = F->getIntrinsicID(); 5032 if (IsIdempotent(IID)) 5033 if (auto *II = dyn_cast<IntrinsicInst>(Op0)) 5034 if (II->getIntrinsicID() == IID) 5035 return II; 5036 5037 Value *X; 5038 switch (IID) { 5039 case Intrinsic::fabs: 5040 if (SignBitMustBeZero(Op0, Q.TLI)) return Op0; 5041 break; 5042 case Intrinsic::bswap: 5043 // bswap(bswap(x)) -> x 5044 if (match(Op0, m_BSwap(m_Value(X)))) return X; 5045 break; 5046 case Intrinsic::bitreverse: 5047 // bitreverse(bitreverse(x)) -> x 5048 if (match(Op0, m_BitReverse(m_Value(X)))) return X; 5049 break; 5050 case Intrinsic::exp: 5051 // exp(log(x)) -> x 5052 if (Q.CxtI->hasAllowReassoc() && 5053 match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X)))) return X; 5054 break; 5055 case Intrinsic::exp2: 5056 // exp2(log2(x)) -> x 5057 if (Q.CxtI->hasAllowReassoc() && 5058 match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X)))) return X; 5059 break; 5060 case Intrinsic::log: 5061 // log(exp(x)) -> x 5062 if (Q.CxtI->hasAllowReassoc() && 5063 match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X)))) return X; 5064 break; 5065 case Intrinsic::log2: 5066 // log2(exp2(x)) -> x 5067 if (Q.CxtI->hasAllowReassoc() && 5068 (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) || 5069 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0), 5070 m_Value(X))))) return X; 5071 break; 5072 case Intrinsic::log10: 5073 // log10(pow(10.0, x)) -> x 5074 if (Q.CxtI->hasAllowReassoc() && 5075 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0), 5076 m_Value(X)))) return X; 5077 break; 5078 case Intrinsic::floor: 5079 case Intrinsic::trunc: 5080 case Intrinsic::ceil: 5081 case Intrinsic::round: 5082 case Intrinsic::nearbyint: 5083 case Intrinsic::rint: { 5084 // floor (sitofp x) -> sitofp x 5085 // floor (uitofp x) -> uitofp x 5086 // 5087 // Converting from int always results in a finite integral number or 5088 // infinity. For either of those inputs, these rounding functions always 5089 // return the same value, so the rounding can be eliminated. 5090 if (match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value()))) 5091 return Op0; 5092 break; 5093 } 5094 default: 5095 break; 5096 } 5097 5098 return nullptr; 5099 } 5100 5101 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1, 5102 const SimplifyQuery &Q) { 5103 Intrinsic::ID IID = F->getIntrinsicID(); 5104 Type *ReturnType = F->getReturnType(); 5105 switch (IID) { 5106 case Intrinsic::usub_with_overflow: 5107 case Intrinsic::ssub_with_overflow: 5108 // X - X -> { 0, false } 5109 if (Op0 == Op1) 5110 return Constant::getNullValue(ReturnType); 5111 LLVM_FALLTHROUGH; 5112 case Intrinsic::uadd_with_overflow: 5113 case Intrinsic::sadd_with_overflow: 5114 // X - undef -> { undef, false } 5115 // undef - X -> { undef, false } 5116 // X + undef -> { undef, false } 5117 // undef + x -> { undef, false } 5118 if (isa<UndefValue>(Op0) || isa<UndefValue>(Op1)) { 5119 return ConstantStruct::get( 5120 cast<StructType>(ReturnType), 5121 {UndefValue::get(ReturnType->getStructElementType(0)), 5122 Constant::getNullValue(ReturnType->getStructElementType(1))}); 5123 } 5124 break; 5125 case Intrinsic::umul_with_overflow: 5126 case Intrinsic::smul_with_overflow: 5127 // 0 * X -> { 0, false } 5128 // X * 0 -> { 0, false } 5129 if (match(Op0, m_Zero()) || match(Op1, m_Zero())) 5130 return Constant::getNullValue(ReturnType); 5131 // undef * X -> { 0, false } 5132 // X * undef -> { 0, false } 5133 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 5134 return Constant::getNullValue(ReturnType); 5135 break; 5136 case Intrinsic::uadd_sat: 5137 // sat(MAX + X) -> MAX 5138 // sat(X + MAX) -> MAX 5139 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes())) 5140 return Constant::getAllOnesValue(ReturnType); 5141 LLVM_FALLTHROUGH; 5142 case Intrinsic::sadd_sat: 5143 // sat(X + undef) -> -1 5144 // sat(undef + X) -> -1 5145 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1). 5146 // For signed: Assume undef is ~X, in which case X + ~X = -1. 5147 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 5148 return Constant::getAllOnesValue(ReturnType); 5149 5150 // X + 0 -> X 5151 if (match(Op1, m_Zero())) 5152 return Op0; 5153 // 0 + X -> X 5154 if (match(Op0, m_Zero())) 5155 return Op1; 5156 break; 5157 case Intrinsic::usub_sat: 5158 // sat(0 - X) -> 0, sat(X - MAX) -> 0 5159 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes())) 5160 return Constant::getNullValue(ReturnType); 5161 LLVM_FALLTHROUGH; 5162 case Intrinsic::ssub_sat: 5163 // X - X -> 0, X - undef -> 0, undef - X -> 0 5164 if (Op0 == Op1 || match(Op0, m_Undef()) || match(Op1, m_Undef())) 5165 return Constant::getNullValue(ReturnType); 5166 // X - 0 -> X 5167 if (match(Op1, m_Zero())) 5168 return Op0; 5169 break; 5170 case Intrinsic::load_relative: 5171 if (auto *C0 = dyn_cast<Constant>(Op0)) 5172 if (auto *C1 = dyn_cast<Constant>(Op1)) 5173 return SimplifyRelativeLoad(C0, C1, Q.DL); 5174 break; 5175 case Intrinsic::powi: 5176 if (auto *Power = dyn_cast<ConstantInt>(Op1)) { 5177 // powi(x, 0) -> 1.0 5178 if (Power->isZero()) 5179 return ConstantFP::get(Op0->getType(), 1.0); 5180 // powi(x, 1) -> x 5181 if (Power->isOne()) 5182 return Op0; 5183 } 5184 break; 5185 case Intrinsic::copysign: 5186 // copysign X, X --> X 5187 if (Op0 == Op1) 5188 return Op0; 5189 // copysign -X, X --> X 5190 // copysign X, -X --> -X 5191 if (match(Op0, m_FNeg(m_Specific(Op1))) || 5192 match(Op1, m_FNeg(m_Specific(Op0)))) 5193 return Op1; 5194 break; 5195 case Intrinsic::maxnum: 5196 case Intrinsic::minnum: 5197 case Intrinsic::maximum: 5198 case Intrinsic::minimum: { 5199 // If the arguments are the same, this is a no-op. 5200 if (Op0 == Op1) return Op0; 5201 5202 // If one argument is undef, return the other argument. 5203 if (match(Op0, m_Undef())) 5204 return Op1; 5205 if (match(Op1, m_Undef())) 5206 return Op0; 5207 5208 // If one argument is NaN, return other or NaN appropriately. 5209 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum; 5210 if (match(Op0, m_NaN())) 5211 return PropagateNaN ? Op0 : Op1; 5212 if (match(Op1, m_NaN())) 5213 return PropagateNaN ? Op1 : Op0; 5214 5215 // Min/max of the same operation with common operand: 5216 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants) 5217 if (auto *M0 = dyn_cast<IntrinsicInst>(Op0)) 5218 if (M0->getIntrinsicID() == IID && 5219 (M0->getOperand(0) == Op1 || M0->getOperand(1) == Op1)) 5220 return Op0; 5221 if (auto *M1 = dyn_cast<IntrinsicInst>(Op1)) 5222 if (M1->getIntrinsicID() == IID && 5223 (M1->getOperand(0) == Op0 || M1->getOperand(1) == Op0)) 5224 return Op1; 5225 5226 // min(X, -Inf) --> -Inf (and commuted variant) 5227 // max(X, +Inf) --> +Inf (and commuted variant) 5228 bool UseNegInf = IID == Intrinsic::minnum || IID == Intrinsic::minimum; 5229 const APFloat *C; 5230 if ((match(Op0, m_APFloat(C)) && C->isInfinity() && 5231 C->isNegative() == UseNegInf) || 5232 (match(Op1, m_APFloat(C)) && C->isInfinity() && 5233 C->isNegative() == UseNegInf)) 5234 return ConstantFP::getInfinity(ReturnType, UseNegInf); 5235 5236 // TODO: minnum(nnan x, inf) -> x 5237 // TODO: minnum(nnan ninf x, flt_max) -> x 5238 // TODO: maxnum(nnan x, -inf) -> x 5239 // TODO: maxnum(nnan ninf x, -flt_max) -> x 5240 break; 5241 } 5242 default: 5243 break; 5244 } 5245 5246 return nullptr; 5247 } 5248 5249 static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) { 5250 5251 // Intrinsics with no operands have some kind of side effect. Don't simplify. 5252 unsigned NumOperands = Call->getNumArgOperands(); 5253 if (!NumOperands) 5254 return nullptr; 5255 5256 Function *F = cast<Function>(Call->getCalledFunction()); 5257 Intrinsic::ID IID = F->getIntrinsicID(); 5258 if (NumOperands == 1) 5259 return simplifyUnaryIntrinsic(F, Call->getArgOperand(0), Q); 5260 5261 if (NumOperands == 2) 5262 return simplifyBinaryIntrinsic(F, Call->getArgOperand(0), 5263 Call->getArgOperand(1), Q); 5264 5265 // Handle intrinsics with 3 or more arguments. 5266 switch (IID) { 5267 case Intrinsic::masked_load: 5268 case Intrinsic::masked_gather: { 5269 Value *MaskArg = Call->getArgOperand(2); 5270 Value *PassthruArg = Call->getArgOperand(3); 5271 // If the mask is all zeros or undef, the "passthru" argument is the result. 5272 if (maskIsAllZeroOrUndef(MaskArg)) 5273 return PassthruArg; 5274 return nullptr; 5275 } 5276 case Intrinsic::fshl: 5277 case Intrinsic::fshr: { 5278 Value *Op0 = Call->getArgOperand(0), *Op1 = Call->getArgOperand(1), 5279 *ShAmtArg = Call->getArgOperand(2); 5280 5281 // If both operands are undef, the result is undef. 5282 if (match(Op0, m_Undef()) && match(Op1, m_Undef())) 5283 return UndefValue::get(F->getReturnType()); 5284 5285 // If shift amount is undef, assume it is zero. 5286 if (match(ShAmtArg, m_Undef())) 5287 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1); 5288 5289 const APInt *ShAmtC; 5290 if (match(ShAmtArg, m_APInt(ShAmtC))) { 5291 // If there's effectively no shift, return the 1st arg or 2nd arg. 5292 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth()); 5293 if (ShAmtC->urem(BitWidth).isNullValue()) 5294 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1); 5295 } 5296 return nullptr; 5297 } 5298 case Intrinsic::fma: 5299 case Intrinsic::fmuladd: { 5300 Value *Op0 = Call->getArgOperand(0); 5301 Value *Op1 = Call->getArgOperand(1); 5302 Value *Op2 = Call->getArgOperand(2); 5303 if (Value *V = simplifyFPOp({ Op0, Op1, Op2 })) 5304 return V; 5305 return nullptr; 5306 } 5307 default: 5308 return nullptr; 5309 } 5310 } 5311 5312 Value *llvm::SimplifyCall(CallBase *Call, const SimplifyQuery &Q) { 5313 Value *Callee = Call->getCalledValue(); 5314 5315 // call undef -> undef 5316 // call null -> undef 5317 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee)) 5318 return UndefValue::get(Call->getType()); 5319 5320 Function *F = dyn_cast<Function>(Callee); 5321 if (!F) 5322 return nullptr; 5323 5324 if (F->isIntrinsic()) 5325 if (Value *Ret = simplifyIntrinsic(Call, Q)) 5326 return Ret; 5327 5328 if (!canConstantFoldCallTo(Call, F)) 5329 return nullptr; 5330 5331 SmallVector<Constant *, 4> ConstantArgs; 5332 unsigned NumArgs = Call->getNumArgOperands(); 5333 ConstantArgs.reserve(NumArgs); 5334 for (auto &Arg : Call->args()) { 5335 Constant *C = dyn_cast<Constant>(&Arg); 5336 if (!C) 5337 return nullptr; 5338 ConstantArgs.push_back(C); 5339 } 5340 5341 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI); 5342 } 5343 5344 /// Given operands for a Freeze, see if we can fold the result. 5345 static Value *SimplifyFreezeInst(Value *Op0) { 5346 // Use a utility function defined in ValueTracking. 5347 if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0)) 5348 return Op0; 5349 // We have room for improvement. 5350 return nullptr; 5351 } 5352 5353 Value *llvm::SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) { 5354 return ::SimplifyFreezeInst(Op0); 5355 } 5356 5357 /// See if we can compute a simplified version of this instruction. 5358 /// If not, this returns null. 5359 5360 Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ, 5361 OptimizationRemarkEmitter *ORE) { 5362 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I); 5363 Value *Result; 5364 5365 switch (I->getOpcode()) { 5366 default: 5367 Result = ConstantFoldInstruction(I, Q.DL, Q.TLI); 5368 break; 5369 case Instruction::FNeg: 5370 Result = SimplifyFNegInst(I->getOperand(0), I->getFastMathFlags(), Q); 5371 break; 5372 case Instruction::FAdd: 5373 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1), 5374 I->getFastMathFlags(), Q); 5375 break; 5376 case Instruction::Add: 5377 Result = 5378 SimplifyAddInst(I->getOperand(0), I->getOperand(1), 5379 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), 5380 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); 5381 break; 5382 case Instruction::FSub: 5383 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1), 5384 I->getFastMathFlags(), Q); 5385 break; 5386 case Instruction::Sub: 5387 Result = 5388 SimplifySubInst(I->getOperand(0), I->getOperand(1), 5389 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), 5390 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); 5391 break; 5392 case Instruction::FMul: 5393 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1), 5394 I->getFastMathFlags(), Q); 5395 break; 5396 case Instruction::Mul: 5397 Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), Q); 5398 break; 5399 case Instruction::SDiv: 5400 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), Q); 5401 break; 5402 case Instruction::UDiv: 5403 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), Q); 5404 break; 5405 case Instruction::FDiv: 5406 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), 5407 I->getFastMathFlags(), Q); 5408 break; 5409 case Instruction::SRem: 5410 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), Q); 5411 break; 5412 case Instruction::URem: 5413 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), Q); 5414 break; 5415 case Instruction::FRem: 5416 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), 5417 I->getFastMathFlags(), Q); 5418 break; 5419 case Instruction::Shl: 5420 Result = 5421 SimplifyShlInst(I->getOperand(0), I->getOperand(1), 5422 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), 5423 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); 5424 break; 5425 case Instruction::LShr: 5426 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1), 5427 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q); 5428 break; 5429 case Instruction::AShr: 5430 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1), 5431 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q); 5432 break; 5433 case Instruction::And: 5434 Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q); 5435 break; 5436 case Instruction::Or: 5437 Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), Q); 5438 break; 5439 case Instruction::Xor: 5440 Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), Q); 5441 break; 5442 case Instruction::ICmp: 5443 Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), 5444 I->getOperand(0), I->getOperand(1), Q); 5445 break; 5446 case Instruction::FCmp: 5447 Result = 5448 SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0), 5449 I->getOperand(1), I->getFastMathFlags(), Q); 5450 break; 5451 case Instruction::Select: 5452 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1), 5453 I->getOperand(2), Q); 5454 break; 5455 case Instruction::GetElementPtr: { 5456 SmallVector<Value *, 8> Ops(I->op_begin(), I->op_end()); 5457 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(), 5458 Ops, Q); 5459 break; 5460 } 5461 case Instruction::InsertValue: { 5462 InsertValueInst *IV = cast<InsertValueInst>(I); 5463 Result = SimplifyInsertValueInst(IV->getAggregateOperand(), 5464 IV->getInsertedValueOperand(), 5465 IV->getIndices(), Q); 5466 break; 5467 } 5468 case Instruction::InsertElement: { 5469 auto *IE = cast<InsertElementInst>(I); 5470 Result = SimplifyInsertElementInst(IE->getOperand(0), IE->getOperand(1), 5471 IE->getOperand(2), Q); 5472 break; 5473 } 5474 case Instruction::ExtractValue: { 5475 auto *EVI = cast<ExtractValueInst>(I); 5476 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(), 5477 EVI->getIndices(), Q); 5478 break; 5479 } 5480 case Instruction::ExtractElement: { 5481 auto *EEI = cast<ExtractElementInst>(I); 5482 Result = SimplifyExtractElementInst(EEI->getVectorOperand(), 5483 EEI->getIndexOperand(), Q); 5484 break; 5485 } 5486 case Instruction::ShuffleVector: { 5487 auto *SVI = cast<ShuffleVectorInst>(I); 5488 Result = SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 5489 SVI->getMask(), SVI->getType(), Q); 5490 break; 5491 } 5492 case Instruction::PHI: 5493 Result = SimplifyPHINode(cast<PHINode>(I), Q); 5494 break; 5495 case Instruction::Call: { 5496 Result = SimplifyCall(cast<CallInst>(I), Q); 5497 break; 5498 } 5499 case Instruction::Freeze: 5500 Result = SimplifyFreezeInst(I->getOperand(0), Q); 5501 break; 5502 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc: 5503 #include "llvm/IR/Instruction.def" 5504 #undef HANDLE_CAST_INST 5505 Result = 5506 SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), Q); 5507 break; 5508 case Instruction::Alloca: 5509 // No simplifications for Alloca and it can't be constant folded. 5510 Result = nullptr; 5511 break; 5512 } 5513 5514 // In general, it is possible for computeKnownBits to determine all bits in a 5515 // value even when the operands are not all constants. 5516 if (!Result && I->getType()->isIntOrIntVectorTy()) { 5517 KnownBits Known = computeKnownBits(I, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE); 5518 if (Known.isConstant()) 5519 Result = ConstantInt::get(I->getType(), Known.getConstant()); 5520 } 5521 5522 /// If called on unreachable code, the above logic may report that the 5523 /// instruction simplified to itself. Make life easier for users by 5524 /// detecting that case here, returning a safe value instead. 5525 return Result == I ? UndefValue::get(I->getType()) : Result; 5526 } 5527 5528 /// Implementation of recursive simplification through an instruction's 5529 /// uses. 5530 /// 5531 /// This is the common implementation of the recursive simplification routines. 5532 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to 5533 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of 5534 /// instructions to process and attempt to simplify it using 5535 /// InstructionSimplify. Recursively visited users which could not be 5536 /// simplified themselves are to the optional UnsimplifiedUsers set for 5537 /// further processing by the caller. 5538 /// 5539 /// This routine returns 'true' only when *it* simplifies something. The passed 5540 /// in simplified value does not count toward this. 5541 static bool replaceAndRecursivelySimplifyImpl( 5542 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, 5543 const DominatorTree *DT, AssumptionCache *AC, 5544 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) { 5545 bool Simplified = false; 5546 SmallSetVector<Instruction *, 8> Worklist; 5547 const DataLayout &DL = I->getModule()->getDataLayout(); 5548 5549 // If we have an explicit value to collapse to, do that round of the 5550 // simplification loop by hand initially. 5551 if (SimpleV) { 5552 for (User *U : I->users()) 5553 if (U != I) 5554 Worklist.insert(cast<Instruction>(U)); 5555 5556 // Replace the instruction with its simplified value. 5557 I->replaceAllUsesWith(SimpleV); 5558 5559 // Gracefully handle edge cases where the instruction is not wired into any 5560 // parent block. 5561 if (I->getParent() && !I->isEHPad() && !I->isTerminator() && 5562 !I->mayHaveSideEffects()) 5563 I->eraseFromParent(); 5564 } else { 5565 Worklist.insert(I); 5566 } 5567 5568 // Note that we must test the size on each iteration, the worklist can grow. 5569 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) { 5570 I = Worklist[Idx]; 5571 5572 // See if this instruction simplifies. 5573 SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC}); 5574 if (!SimpleV) { 5575 if (UnsimplifiedUsers) 5576 UnsimplifiedUsers->insert(I); 5577 continue; 5578 } 5579 5580 Simplified = true; 5581 5582 // Stash away all the uses of the old instruction so we can check them for 5583 // recursive simplifications after a RAUW. This is cheaper than checking all 5584 // uses of To on the recursive step in most cases. 5585 for (User *U : I->users()) 5586 Worklist.insert(cast<Instruction>(U)); 5587 5588 // Replace the instruction with its simplified value. 5589 I->replaceAllUsesWith(SimpleV); 5590 5591 // Gracefully handle edge cases where the instruction is not wired into any 5592 // parent block. 5593 if (I->getParent() && !I->isEHPad() && !I->isTerminator() && 5594 !I->mayHaveSideEffects()) 5595 I->eraseFromParent(); 5596 } 5597 return Simplified; 5598 } 5599 5600 bool llvm::recursivelySimplifyInstruction(Instruction *I, 5601 const TargetLibraryInfo *TLI, 5602 const DominatorTree *DT, 5603 AssumptionCache *AC) { 5604 return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC, nullptr); 5605 } 5606 5607 bool llvm::replaceAndRecursivelySimplify( 5608 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, 5609 const DominatorTree *DT, AssumptionCache *AC, 5610 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) { 5611 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!"); 5612 assert(SimpleV && "Must provide a simplified value."); 5613 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC, 5614 UnsimplifiedUsers); 5615 } 5616 5617 namespace llvm { 5618 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) { 5619 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 5620 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; 5621 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 5622 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr; 5623 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>(); 5624 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr; 5625 return {F.getParent()->getDataLayout(), TLI, DT, AC}; 5626 } 5627 5628 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR, 5629 const DataLayout &DL) { 5630 return {DL, &AR.TLI, &AR.DT, &AR.AC}; 5631 } 5632 5633 template <class T, class... TArgs> 5634 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM, 5635 Function &F) { 5636 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F); 5637 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F); 5638 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F); 5639 return {F.getParent()->getDataLayout(), TLI, DT, AC}; 5640 } 5641 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &, 5642 Function &); 5643 } 5644