1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains routines that help analyze properties that chains of 11 // computations have. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/GuardUtils.h" 30 #include "llvm/Analysis/InstructionSimplify.h" 31 #include "llvm/Analysis/Loads.h" 32 #include "llvm/Analysis/LoopInfo.h" 33 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 34 #include "llvm/Analysis/TargetLibraryInfo.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/CallSite.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/ConstantRange.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/DiagnosticInfo.h" 45 #include "llvm/IR/Dominators.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/GetElementPtrTypeIterator.h" 48 #include "llvm/IR/GlobalAlias.h" 49 #include "llvm/IR/GlobalValue.h" 50 #include "llvm/IR/GlobalVariable.h" 51 #include "llvm/IR/InstrTypes.h" 52 #include "llvm/IR/Instruction.h" 53 #include "llvm/IR/Instructions.h" 54 #include "llvm/IR/IntrinsicInst.h" 55 #include "llvm/IR/Intrinsics.h" 56 #include "llvm/IR/LLVMContext.h" 57 #include "llvm/IR/Metadata.h" 58 #include "llvm/IR/Module.h" 59 #include "llvm/IR/Operator.h" 60 #include "llvm/IR/PatternMatch.h" 61 #include "llvm/IR/Type.h" 62 #include "llvm/IR/User.h" 63 #include "llvm/IR/Value.h" 64 #include "llvm/Support/Casting.h" 65 #include "llvm/Support/CommandLine.h" 66 #include "llvm/Support/Compiler.h" 67 #include "llvm/Support/ErrorHandling.h" 68 #include "llvm/Support/KnownBits.h" 69 #include "llvm/Support/MathExtras.h" 70 #include <algorithm> 71 #include <array> 72 #include <cassert> 73 #include <cstdint> 74 #include <iterator> 75 #include <utility> 76 77 using namespace llvm; 78 using namespace llvm::PatternMatch; 79 80 const unsigned MaxDepth = 6; 81 82 // Controls the number of uses of the value searched for possible 83 // dominating comparisons. 84 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 85 cl::Hidden, cl::init(20)); 86 87 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 88 /// returns the element type's bitwidth. 89 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 90 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 91 return BitWidth; 92 93 return DL.getIndexTypeSizeInBits(Ty); 94 } 95 96 namespace { 97 98 // Simplifying using an assume can only be done in a particular control-flow 99 // context (the context instruction provides that context). If an assume and 100 // the context instruction are not in the same block then the DT helps in 101 // figuring out if we can use it. 102 struct Query { 103 const DataLayout &DL; 104 AssumptionCache *AC; 105 const Instruction *CxtI; 106 const DominatorTree *DT; 107 108 // Unlike the other analyses, this may be a nullptr because not all clients 109 // provide it currently. 110 OptimizationRemarkEmitter *ORE; 111 112 /// Set of assumptions that should be excluded from further queries. 113 /// This is because of the potential for mutual recursion to cause 114 /// computeKnownBits to repeatedly visit the same assume intrinsic. The 115 /// classic case of this is assume(x = y), which will attempt to determine 116 /// bits in x from bits in y, which will attempt to determine bits in y from 117 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 118 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo 119 /// (all of which can call computeKnownBits), and so on. 120 std::array<const Value *, MaxDepth> Excluded; 121 122 /// If true, it is safe to use metadata during simplification. 123 InstrInfoQuery IIQ; 124 125 unsigned NumExcluded = 0; 126 127 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 128 const DominatorTree *DT, bool UseInstrInfo, 129 OptimizationRemarkEmitter *ORE = nullptr) 130 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {} 131 132 Query(const Query &Q, const Value *NewExcl) 133 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ), 134 NumExcluded(Q.NumExcluded) { 135 Excluded = Q.Excluded; 136 Excluded[NumExcluded++] = NewExcl; 137 assert(NumExcluded <= Excluded.size()); 138 } 139 140 bool isExcluded(const Value *Value) const { 141 if (NumExcluded == 0) 142 return false; 143 auto End = Excluded.begin() + NumExcluded; 144 return std::find(Excluded.begin(), End, Value) != End; 145 } 146 }; 147 148 } // end anonymous namespace 149 150 // Given the provided Value and, potentially, a context instruction, return 151 // the preferred context instruction (if any). 152 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 153 // If we've been provided with a context instruction, then use that (provided 154 // it has been inserted). 155 if (CxtI && CxtI->getParent()) 156 return CxtI; 157 158 // If the value is really an already-inserted instruction, then use that. 159 CxtI = dyn_cast<Instruction>(V); 160 if (CxtI && CxtI->getParent()) 161 return CxtI; 162 163 return nullptr; 164 } 165 166 static void computeKnownBits(const Value *V, KnownBits &Known, 167 unsigned Depth, const Query &Q); 168 169 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 170 const DataLayout &DL, unsigned Depth, 171 AssumptionCache *AC, const Instruction *CxtI, 172 const DominatorTree *DT, 173 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { 174 ::computeKnownBits(V, Known, Depth, 175 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 176 } 177 178 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 179 const Query &Q); 180 181 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 182 unsigned Depth, AssumptionCache *AC, 183 const Instruction *CxtI, 184 const DominatorTree *DT, 185 OptimizationRemarkEmitter *ORE, 186 bool UseInstrInfo) { 187 return ::computeKnownBits( 188 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 189 } 190 191 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 192 const DataLayout &DL, AssumptionCache *AC, 193 const Instruction *CxtI, const DominatorTree *DT, 194 bool UseInstrInfo) { 195 assert(LHS->getType() == RHS->getType() && 196 "LHS and RHS should have the same type"); 197 assert(LHS->getType()->isIntOrIntVectorTy() && 198 "LHS and RHS should be integers"); 199 // Look for an inverted mask: (X & ~M) op (Y & M). 200 Value *M; 201 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 202 match(RHS, m_c_And(m_Specific(M), m_Value()))) 203 return true; 204 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 205 match(LHS, m_c_And(m_Specific(M), m_Value()))) 206 return true; 207 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 208 KnownBits LHSKnown(IT->getBitWidth()); 209 KnownBits RHSKnown(IT->getBitWidth()); 210 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 211 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 212 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue(); 213 } 214 215 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) { 216 for (const User *U : CxtI->users()) { 217 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) 218 if (IC->isEquality()) 219 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1))) 220 if (C->isNullValue()) 221 continue; 222 return false; 223 } 224 return true; 225 } 226 227 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 228 const Query &Q); 229 230 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 231 bool OrZero, unsigned Depth, 232 AssumptionCache *AC, const Instruction *CxtI, 233 const DominatorTree *DT, bool UseInstrInfo) { 234 return ::isKnownToBeAPowerOfTwo( 235 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 236 } 237 238 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 239 240 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 241 AssumptionCache *AC, const Instruction *CxtI, 242 const DominatorTree *DT, bool UseInstrInfo) { 243 return ::isKnownNonZero(V, Depth, 244 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 245 } 246 247 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 248 unsigned Depth, AssumptionCache *AC, 249 const Instruction *CxtI, const DominatorTree *DT, 250 bool UseInstrInfo) { 251 KnownBits Known = 252 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 253 return Known.isNonNegative(); 254 } 255 256 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 257 AssumptionCache *AC, const Instruction *CxtI, 258 const DominatorTree *DT, bool UseInstrInfo) { 259 if (auto *CI = dyn_cast<ConstantInt>(V)) 260 return CI->getValue().isStrictlyPositive(); 261 262 // TODO: We'd doing two recursive queries here. We should factor this such 263 // that only a single query is needed. 264 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) && 265 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); 266 } 267 268 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 269 AssumptionCache *AC, const Instruction *CxtI, 270 const DominatorTree *DT, bool UseInstrInfo) { 271 KnownBits Known = 272 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 273 return Known.isNegative(); 274 } 275 276 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); 277 278 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 279 const DataLayout &DL, AssumptionCache *AC, 280 const Instruction *CxtI, const DominatorTree *DT, 281 bool UseInstrInfo) { 282 return ::isKnownNonEqual(V1, V2, 283 Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT, 284 UseInstrInfo, /*ORE=*/nullptr)); 285 } 286 287 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 288 const Query &Q); 289 290 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 291 const DataLayout &DL, unsigned Depth, 292 AssumptionCache *AC, const Instruction *CxtI, 293 const DominatorTree *DT, bool UseInstrInfo) { 294 return ::MaskedValueIsZero( 295 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 296 } 297 298 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 299 const Query &Q); 300 301 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 302 unsigned Depth, AssumptionCache *AC, 303 const Instruction *CxtI, 304 const DominatorTree *DT, bool UseInstrInfo) { 305 return ::ComputeNumSignBits( 306 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 307 } 308 309 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 310 bool NSW, 311 KnownBits &KnownOut, KnownBits &Known2, 312 unsigned Depth, const Query &Q) { 313 unsigned BitWidth = KnownOut.getBitWidth(); 314 315 // If an initial sequence of bits in the result is not needed, the 316 // corresponding bits in the operands are not needed. 317 KnownBits LHSKnown(BitWidth); 318 computeKnownBits(Op0, LHSKnown, Depth + 1, Q); 319 computeKnownBits(Op1, Known2, Depth + 1, Q); 320 321 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2); 322 } 323 324 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 325 KnownBits &Known, KnownBits &Known2, 326 unsigned Depth, const Query &Q) { 327 unsigned BitWidth = Known.getBitWidth(); 328 computeKnownBits(Op1, Known, Depth + 1, Q); 329 computeKnownBits(Op0, Known2, Depth + 1, Q); 330 331 bool isKnownNegative = false; 332 bool isKnownNonNegative = false; 333 // If the multiplication is known not to overflow, compute the sign bit. 334 if (NSW) { 335 if (Op0 == Op1) { 336 // The product of a number with itself is non-negative. 337 isKnownNonNegative = true; 338 } else { 339 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 340 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 341 bool isKnownNegativeOp1 = Known.isNegative(); 342 bool isKnownNegativeOp0 = Known2.isNegative(); 343 // The product of two numbers with the same sign is non-negative. 344 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 345 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 346 // The product of a negative number and a non-negative number is either 347 // negative or zero. 348 if (!isKnownNonNegative) 349 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 350 isKnownNonZero(Op0, Depth, Q)) || 351 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 352 isKnownNonZero(Op1, Depth, Q)); 353 } 354 } 355 356 assert(!Known.hasConflict() && !Known2.hasConflict()); 357 // Compute a conservative estimate for high known-0 bits. 358 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 359 Known2.countMinLeadingZeros(), 360 BitWidth) - BitWidth; 361 LeadZ = std::min(LeadZ, BitWidth); 362 363 // The result of the bottom bits of an integer multiply can be 364 // inferred by looking at the bottom bits of both operands and 365 // multiplying them together. 366 // We can infer at least the minimum number of known trailing bits 367 // of both operands. Depending on number of trailing zeros, we can 368 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming 369 // a and b are divisible by m and n respectively. 370 // We then calculate how many of those bits are inferrable and set 371 // the output. For example, the i8 mul: 372 // a = XXXX1100 (12) 373 // b = XXXX1110 (14) 374 // We know the bottom 3 bits are zero since the first can be divided by 375 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4). 376 // Applying the multiplication to the trimmed arguments gets: 377 // XX11 (3) 378 // X111 (7) 379 // ------- 380 // XX11 381 // XX11 382 // XX11 383 // XX11 384 // ------- 385 // XXXXX01 386 // Which allows us to infer the 2 LSBs. Since we're multiplying the result 387 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits. 388 // The proof for this can be described as: 389 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) && 390 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) + 391 // umin(countTrailingZeros(C2), C6) + 392 // umin(C5 - umin(countTrailingZeros(C1), C5), 393 // C6 - umin(countTrailingZeros(C2), C6)))) - 1) 394 // %aa = shl i8 %a, C5 395 // %bb = shl i8 %b, C6 396 // %aaa = or i8 %aa, C1 397 // %bbb = or i8 %bb, C2 398 // %mul = mul i8 %aaa, %bbb 399 // %mask = and i8 %mul, C7 400 // => 401 // %mask = i8 ((C1*C2)&C7) 402 // Where C5, C6 describe the known bits of %a, %b 403 // C1, C2 describe the known bottom bits of %a, %b. 404 // C7 describes the mask of the known bits of the result. 405 APInt Bottom0 = Known.One; 406 APInt Bottom1 = Known2.One; 407 408 // How many times we'd be able to divide each argument by 2 (shr by 1). 409 // This gives us the number of trailing zeros on the multiplication result. 410 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes(); 411 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes(); 412 unsigned TrailZero0 = Known.countMinTrailingZeros(); 413 unsigned TrailZero1 = Known2.countMinTrailingZeros(); 414 unsigned TrailZ = TrailZero0 + TrailZero1; 415 416 // Figure out the fewest known-bits operand. 417 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0, 418 TrailBitsKnown1 - TrailZero1); 419 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth); 420 421 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) * 422 Bottom1.getLoBits(TrailBitsKnown1); 423 424 Known.resetAll(); 425 Known.Zero.setHighBits(LeadZ); 426 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown); 427 Known.One |= BottomKnown.getLoBits(ResultBitsKnown); 428 429 // Only make use of no-wrap flags if we failed to compute the sign bit 430 // directly. This matters if the multiplication always overflows, in 431 // which case we prefer to follow the result of the direct computation, 432 // though as the program is invoking undefined behaviour we can choose 433 // whatever we like here. 434 if (isKnownNonNegative && !Known.isNegative()) 435 Known.makeNonNegative(); 436 else if (isKnownNegative && !Known.isNonNegative()) 437 Known.makeNegative(); 438 } 439 440 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 441 KnownBits &Known) { 442 unsigned BitWidth = Known.getBitWidth(); 443 unsigned NumRanges = Ranges.getNumOperands() / 2; 444 assert(NumRanges >= 1); 445 446 Known.Zero.setAllBits(); 447 Known.One.setAllBits(); 448 449 for (unsigned i = 0; i < NumRanges; ++i) { 450 ConstantInt *Lower = 451 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 452 ConstantInt *Upper = 453 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 454 ConstantRange Range(Lower->getValue(), Upper->getValue()); 455 456 // The first CommonPrefixBits of all values in Range are equal. 457 unsigned CommonPrefixBits = 458 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 459 460 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 461 Known.One &= Range.getUnsignedMax() & Mask; 462 Known.Zero &= ~Range.getUnsignedMax() & Mask; 463 } 464 } 465 466 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 467 SmallVector<const Value *, 16> WorkSet(1, I); 468 SmallPtrSet<const Value *, 32> Visited; 469 SmallPtrSet<const Value *, 16> EphValues; 470 471 // The instruction defining an assumption's condition itself is always 472 // considered ephemeral to that assumption (even if it has other 473 // non-ephemeral users). See r246696's test case for an example. 474 if (is_contained(I->operands(), E)) 475 return true; 476 477 while (!WorkSet.empty()) { 478 const Value *V = WorkSet.pop_back_val(); 479 if (!Visited.insert(V).second) 480 continue; 481 482 // If all uses of this value are ephemeral, then so is this value. 483 if (llvm::all_of(V->users(), [&](const User *U) { 484 return EphValues.count(U); 485 })) { 486 if (V == E) 487 return true; 488 489 if (V == I || isSafeToSpeculativelyExecute(V)) { 490 EphValues.insert(V); 491 if (const User *U = dyn_cast<User>(V)) 492 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 493 J != JE; ++J) 494 WorkSet.push_back(*J); 495 } 496 } 497 } 498 499 return false; 500 } 501 502 // Is this an intrinsic that cannot be speculated but also cannot trap? 503 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { 504 if (const CallInst *CI = dyn_cast<CallInst>(I)) 505 if (Function *F = CI->getCalledFunction()) 506 switch (F->getIntrinsicID()) { 507 default: break; 508 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 509 case Intrinsic::assume: 510 case Intrinsic::sideeffect: 511 case Intrinsic::dbg_declare: 512 case Intrinsic::dbg_value: 513 case Intrinsic::dbg_label: 514 case Intrinsic::invariant_start: 515 case Intrinsic::invariant_end: 516 case Intrinsic::lifetime_start: 517 case Intrinsic::lifetime_end: 518 case Intrinsic::objectsize: 519 case Intrinsic::ptr_annotation: 520 case Intrinsic::var_annotation: 521 return true; 522 } 523 524 return false; 525 } 526 527 bool llvm::isValidAssumeForContext(const Instruction *Inv, 528 const Instruction *CxtI, 529 const DominatorTree *DT) { 530 // There are two restrictions on the use of an assume: 531 // 1. The assume must dominate the context (or the control flow must 532 // reach the assume whenever it reaches the context). 533 // 2. The context must not be in the assume's set of ephemeral values 534 // (otherwise we will use the assume to prove that the condition 535 // feeding the assume is trivially true, thus causing the removal of 536 // the assume). 537 538 if (DT) { 539 if (DT->dominates(Inv, CxtI)) 540 return true; 541 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 542 // We don't have a DT, but this trivially dominates. 543 return true; 544 } 545 546 // With or without a DT, the only remaining case we will check is if the 547 // instructions are in the same BB. Give up if that is not the case. 548 if (Inv->getParent() != CxtI->getParent()) 549 return false; 550 551 // If we have a dom tree, then we now know that the assume doesn't dominate 552 // the other instruction. If we don't have a dom tree then we can check if 553 // the assume is first in the BB. 554 if (!DT) { 555 // Search forward from the assume until we reach the context (or the end 556 // of the block); the common case is that the assume will come first. 557 for (auto I = std::next(BasicBlock::const_iterator(Inv)), 558 IE = Inv->getParent()->end(); I != IE; ++I) 559 if (&*I == CxtI) 560 return true; 561 } 562 563 // The context comes first, but they're both in the same block. Make sure 564 // there is nothing in between that might interrupt the control flow. 565 for (BasicBlock::const_iterator I = 566 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv); 567 I != IE; ++I) 568 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 569 return false; 570 571 return !isEphemeralValueOf(Inv, CxtI); 572 } 573 574 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 575 unsigned Depth, const Query &Q) { 576 // Use of assumptions is context-sensitive. If we don't have a context, we 577 // cannot use them! 578 if (!Q.AC || !Q.CxtI) 579 return; 580 581 unsigned BitWidth = Known.getBitWidth(); 582 583 // Note that the patterns below need to be kept in sync with the code 584 // in AssumptionCache::updateAffectedValues. 585 586 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 587 if (!AssumeVH) 588 continue; 589 CallInst *I = cast<CallInst>(AssumeVH); 590 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 591 "Got assumption for the wrong function!"); 592 if (Q.isExcluded(I)) 593 continue; 594 595 // Warning: This loop can end up being somewhat performance sensitive. 596 // We're running this loop for once for each value queried resulting in a 597 // runtime of ~O(#assumes * #values). 598 599 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 600 "must be an assume intrinsic"); 601 602 Value *Arg = I->getArgOperand(0); 603 604 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 605 assert(BitWidth == 1 && "assume operand is not i1?"); 606 Known.setAllOnes(); 607 return; 608 } 609 if (match(Arg, m_Not(m_Specific(V))) && 610 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 611 assert(BitWidth == 1 && "assume operand is not i1?"); 612 Known.setAllZero(); 613 return; 614 } 615 616 // The remaining tests are all recursive, so bail out if we hit the limit. 617 if (Depth == MaxDepth) 618 continue; 619 620 Value *A, *B; 621 auto m_V = m_CombineOr(m_Specific(V), 622 m_CombineOr(m_PtrToInt(m_Specific(V)), 623 m_BitCast(m_Specific(V)))); 624 625 CmpInst::Predicate Pred; 626 uint64_t C; 627 // assume(v = a) 628 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 629 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 630 KnownBits RHSKnown(BitWidth); 631 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 632 Known.Zero |= RHSKnown.Zero; 633 Known.One |= RHSKnown.One; 634 // assume(v & b = a) 635 } else if (match(Arg, 636 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 637 Pred == ICmpInst::ICMP_EQ && 638 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 639 KnownBits RHSKnown(BitWidth); 640 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 641 KnownBits MaskKnown(BitWidth); 642 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 643 644 // For those bits in the mask that are known to be one, we can propagate 645 // known bits from the RHS to V. 646 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 647 Known.One |= RHSKnown.One & MaskKnown.One; 648 // assume(~(v & b) = a) 649 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 650 m_Value(A))) && 651 Pred == ICmpInst::ICMP_EQ && 652 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 653 KnownBits RHSKnown(BitWidth); 654 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 655 KnownBits MaskKnown(BitWidth); 656 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 657 658 // For those bits in the mask that are known to be one, we can propagate 659 // inverted known bits from the RHS to V. 660 Known.Zero |= RHSKnown.One & MaskKnown.One; 661 Known.One |= RHSKnown.Zero & MaskKnown.One; 662 // assume(v | b = a) 663 } else if (match(Arg, 664 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 665 Pred == ICmpInst::ICMP_EQ && 666 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 667 KnownBits RHSKnown(BitWidth); 668 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 669 KnownBits BKnown(BitWidth); 670 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 671 672 // For those bits in B that are known to be zero, we can propagate known 673 // bits from the RHS to V. 674 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 675 Known.One |= RHSKnown.One & BKnown.Zero; 676 // assume(~(v | b) = a) 677 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 678 m_Value(A))) && 679 Pred == ICmpInst::ICMP_EQ && 680 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 681 KnownBits RHSKnown(BitWidth); 682 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 683 KnownBits BKnown(BitWidth); 684 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 685 686 // For those bits in B that are known to be zero, we can propagate 687 // inverted known bits from the RHS to V. 688 Known.Zero |= RHSKnown.One & BKnown.Zero; 689 Known.One |= RHSKnown.Zero & BKnown.Zero; 690 // assume(v ^ b = a) 691 } else if (match(Arg, 692 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 693 Pred == ICmpInst::ICMP_EQ && 694 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 695 KnownBits RHSKnown(BitWidth); 696 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 697 KnownBits BKnown(BitWidth); 698 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 699 700 // For those bits in B that are known to be zero, we can propagate known 701 // bits from the RHS to V. For those bits in B that are known to be one, 702 // we can propagate inverted known bits from the RHS to V. 703 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 704 Known.One |= RHSKnown.One & BKnown.Zero; 705 Known.Zero |= RHSKnown.One & BKnown.One; 706 Known.One |= RHSKnown.Zero & BKnown.One; 707 // assume(~(v ^ b) = a) 708 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 709 m_Value(A))) && 710 Pred == ICmpInst::ICMP_EQ && 711 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 712 KnownBits RHSKnown(BitWidth); 713 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 714 KnownBits BKnown(BitWidth); 715 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 716 717 // For those bits in B that are known to be zero, we can propagate 718 // inverted known bits from the RHS to V. For those bits in B that are 719 // known to be one, we can propagate known bits from the RHS to V. 720 Known.Zero |= RHSKnown.One & BKnown.Zero; 721 Known.One |= RHSKnown.Zero & BKnown.Zero; 722 Known.Zero |= RHSKnown.Zero & BKnown.One; 723 Known.One |= RHSKnown.One & BKnown.One; 724 // assume(v << c = a) 725 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 726 m_Value(A))) && 727 Pred == ICmpInst::ICMP_EQ && 728 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 729 C < BitWidth) { 730 KnownBits RHSKnown(BitWidth); 731 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 732 // For those bits in RHS that are known, we can propagate them to known 733 // bits in V shifted to the right by C. 734 RHSKnown.Zero.lshrInPlace(C); 735 Known.Zero |= RHSKnown.Zero; 736 RHSKnown.One.lshrInPlace(C); 737 Known.One |= RHSKnown.One; 738 // assume(~(v << c) = a) 739 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 740 m_Value(A))) && 741 Pred == ICmpInst::ICMP_EQ && 742 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 743 C < BitWidth) { 744 KnownBits RHSKnown(BitWidth); 745 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 746 // For those bits in RHS that are known, we can propagate them inverted 747 // to known bits in V shifted to the right by C. 748 RHSKnown.One.lshrInPlace(C); 749 Known.Zero |= RHSKnown.One; 750 RHSKnown.Zero.lshrInPlace(C); 751 Known.One |= RHSKnown.Zero; 752 // assume(v >> c = a) 753 } else if (match(Arg, 754 m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), 755 m_Value(A))) && 756 Pred == ICmpInst::ICMP_EQ && 757 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 758 C < BitWidth) { 759 KnownBits RHSKnown(BitWidth); 760 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 761 // For those bits in RHS that are known, we can propagate them to known 762 // bits in V shifted to the right by C. 763 Known.Zero |= RHSKnown.Zero << C; 764 Known.One |= RHSKnown.One << C; 765 // assume(~(v >> c) = a) 766 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), 767 m_Value(A))) && 768 Pred == ICmpInst::ICMP_EQ && 769 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 770 C < BitWidth) { 771 KnownBits RHSKnown(BitWidth); 772 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 773 // For those bits in RHS that are known, we can propagate them inverted 774 // to known bits in V shifted to the right by C. 775 Known.Zero |= RHSKnown.One << C; 776 Known.One |= RHSKnown.Zero << C; 777 // assume(v >=_s c) where c is non-negative 778 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 779 Pred == ICmpInst::ICMP_SGE && 780 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 781 KnownBits RHSKnown(BitWidth); 782 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 783 784 if (RHSKnown.isNonNegative()) { 785 // We know that the sign bit is zero. 786 Known.makeNonNegative(); 787 } 788 // assume(v >_s c) where c is at least -1. 789 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 790 Pred == ICmpInst::ICMP_SGT && 791 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 792 KnownBits RHSKnown(BitWidth); 793 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 794 795 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { 796 // We know that the sign bit is zero. 797 Known.makeNonNegative(); 798 } 799 // assume(v <=_s c) where c is negative 800 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 801 Pred == ICmpInst::ICMP_SLE && 802 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 803 KnownBits RHSKnown(BitWidth); 804 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 805 806 if (RHSKnown.isNegative()) { 807 // We know that the sign bit is one. 808 Known.makeNegative(); 809 } 810 // assume(v <_s c) where c is non-positive 811 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 812 Pred == ICmpInst::ICMP_SLT && 813 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 814 KnownBits RHSKnown(BitWidth); 815 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 816 817 if (RHSKnown.isZero() || RHSKnown.isNegative()) { 818 // We know that the sign bit is one. 819 Known.makeNegative(); 820 } 821 // assume(v <=_u c) 822 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 823 Pred == ICmpInst::ICMP_ULE && 824 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 825 KnownBits RHSKnown(BitWidth); 826 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 827 828 // Whatever high bits in c are zero are known to be zero. 829 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 830 // assume(v <_u c) 831 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 832 Pred == ICmpInst::ICMP_ULT && 833 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 834 KnownBits RHSKnown(BitWidth); 835 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 836 837 // If the RHS is known zero, then this assumption must be wrong (nothing 838 // is unsigned less than zero). Signal a conflict and get out of here. 839 if (RHSKnown.isZero()) { 840 Known.Zero.setAllBits(); 841 Known.One.setAllBits(); 842 break; 843 } 844 845 // Whatever high bits in c are zero are known to be zero (if c is a power 846 // of 2, then one more). 847 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) 848 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); 849 else 850 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 851 } 852 } 853 854 // If assumptions conflict with each other or previous known bits, then we 855 // have a logical fallacy. It's possible that the assumption is not reachable, 856 // so this isn't a real bug. On the other hand, the program may have undefined 857 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 858 // clear out the known bits, try to warn the user, and hope for the best. 859 if (Known.Zero.intersects(Known.One)) { 860 Known.resetAll(); 861 862 if (Q.ORE) 863 Q.ORE->emit([&]() { 864 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 865 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", 866 CxtI) 867 << "Detected conflicting code assumptions. Program may " 868 "have undefined behavior, or compiler may have " 869 "internal error."; 870 }); 871 } 872 } 873 874 /// Compute known bits from a shift operator, including those with a 875 /// non-constant shift amount. Known is the output of this function. Known2 is a 876 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are 877 /// operator-specific functions that, given the known-zero or known-one bits 878 /// respectively, and a shift amount, compute the implied known-zero or 879 /// known-one bits of the shift operator's result respectively for that shift 880 /// amount. The results from calling KZF and KOF are conservatively combined for 881 /// all permitted shift amounts. 882 static void computeKnownBitsFromShiftOperator( 883 const Operator *I, KnownBits &Known, KnownBits &Known2, 884 unsigned Depth, const Query &Q, 885 function_ref<APInt(const APInt &, unsigned)> KZF, 886 function_ref<APInt(const APInt &, unsigned)> KOF) { 887 unsigned BitWidth = Known.getBitWidth(); 888 889 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 890 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); 891 892 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 893 Known.Zero = KZF(Known.Zero, ShiftAmt); 894 Known.One = KOF(Known.One, ShiftAmt); 895 // If the known bits conflict, this must be an overflowing left shift, so 896 // the shift result is poison. We can return anything we want. Choose 0 for 897 // the best folding opportunity. 898 if (Known.hasConflict()) 899 Known.setAllZero(); 900 901 return; 902 } 903 904 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 905 906 // If the shift amount could be greater than or equal to the bit-width of the 907 // LHS, the value could be poison, but bail out because the check below is 908 // expensive. TODO: Should we just carry on? 909 if ((~Known.Zero).uge(BitWidth)) { 910 Known.resetAll(); 911 return; 912 } 913 914 // Note: We cannot use Known.Zero.getLimitedValue() here, because if 915 // BitWidth > 64 and any upper bits are known, we'll end up returning the 916 // limit value (which implies all bits are known). 917 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); 918 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); 919 920 // It would be more-clearly correct to use the two temporaries for this 921 // calculation. Reusing the APInts here to prevent unnecessary allocations. 922 Known.resetAll(); 923 924 // If we know the shifter operand is nonzero, we can sometimes infer more 925 // known bits. However this is expensive to compute, so be lazy about it and 926 // only compute it when absolutely necessary. 927 Optional<bool> ShifterOperandIsNonZero; 928 929 // Early exit if we can't constrain any well-defined shift amount. 930 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && 931 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { 932 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q); 933 if (!*ShifterOperandIsNonZero) 934 return; 935 } 936 937 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 938 939 Known.Zero.setAllBits(); 940 Known.One.setAllBits(); 941 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 942 // Combine the shifted known input bits only for those shift amounts 943 // compatible with its known constraints. 944 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 945 continue; 946 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 947 continue; 948 // If we know the shifter is nonzero, we may be able to infer more known 949 // bits. This check is sunk down as far as possible to avoid the expensive 950 // call to isKnownNonZero if the cheaper checks above fail. 951 if (ShiftAmt == 0) { 952 if (!ShifterOperandIsNonZero.hasValue()) 953 ShifterOperandIsNonZero = 954 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 955 if (*ShifterOperandIsNonZero) 956 continue; 957 } 958 959 Known.Zero &= KZF(Known2.Zero, ShiftAmt); 960 Known.One &= KOF(Known2.One, ShiftAmt); 961 } 962 963 // If the known bits conflict, the result is poison. Return a 0 and hope the 964 // caller can further optimize that. 965 if (Known.hasConflict()) 966 Known.setAllZero(); 967 } 968 969 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known, 970 unsigned Depth, const Query &Q) { 971 unsigned BitWidth = Known.getBitWidth(); 972 973 KnownBits Known2(Known); 974 switch (I->getOpcode()) { 975 default: break; 976 case Instruction::Load: 977 if (MDNode *MD = 978 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range)) 979 computeKnownBitsFromRangeMetadata(*MD, Known); 980 break; 981 case Instruction::And: { 982 // If either the LHS or the RHS are Zero, the result is zero. 983 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 984 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 985 986 // Output known-1 bits are only known if set in both the LHS & RHS. 987 Known.One &= Known2.One; 988 // Output known-0 are known to be clear if zero in either the LHS | RHS. 989 Known.Zero |= Known2.Zero; 990 991 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 992 // here we handle the more general case of adding any odd number by 993 // matching the form add(x, add(x, y)) where y is odd. 994 // TODO: This could be generalized to clearing any bit set in y where the 995 // following bit is known to be unset in y. 996 Value *X = nullptr, *Y = nullptr; 997 if (!Known.Zero[0] && !Known.One[0] && 998 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) { 999 Known2.resetAll(); 1000 computeKnownBits(Y, Known2, Depth + 1, Q); 1001 if (Known2.countMinTrailingOnes() > 0) 1002 Known.Zero.setBit(0); 1003 } 1004 break; 1005 } 1006 case Instruction::Or: 1007 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 1008 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1009 1010 // Output known-0 bits are only known if clear in both the LHS & RHS. 1011 Known.Zero &= Known2.Zero; 1012 // Output known-1 are known to be set if set in either the LHS | RHS. 1013 Known.One |= Known2.One; 1014 break; 1015 case Instruction::Xor: { 1016 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 1017 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1018 1019 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1020 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 1021 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1022 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 1023 Known.Zero = std::move(KnownZeroOut); 1024 break; 1025 } 1026 case Instruction::Mul: { 1027 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1028 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known, 1029 Known2, Depth, Q); 1030 break; 1031 } 1032 case Instruction::UDiv: { 1033 // For the purposes of computing leading zeros we can conservatively 1034 // treat a udiv as a logical right shift by the power of 2 known to 1035 // be less than the denominator. 1036 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1037 unsigned LeadZ = Known2.countMinLeadingZeros(); 1038 1039 Known2.resetAll(); 1040 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1041 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 1042 if (RHSMaxLeadingZeros != BitWidth) 1043 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 1044 1045 Known.Zero.setHighBits(LeadZ); 1046 break; 1047 } 1048 case Instruction::Select: { 1049 const Value *LHS, *RHS; 1050 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 1051 if (SelectPatternResult::isMinOrMax(SPF)) { 1052 computeKnownBits(RHS, Known, Depth + 1, Q); 1053 computeKnownBits(LHS, Known2, Depth + 1, Q); 1054 } else { 1055 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 1056 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1057 } 1058 1059 unsigned MaxHighOnes = 0; 1060 unsigned MaxHighZeros = 0; 1061 if (SPF == SPF_SMAX) { 1062 // If both sides are negative, the result is negative. 1063 if (Known.isNegative() && Known2.isNegative()) 1064 // We can derive a lower bound on the result by taking the max of the 1065 // leading one bits. 1066 MaxHighOnes = 1067 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1068 // If either side is non-negative, the result is non-negative. 1069 else if (Known.isNonNegative() || Known2.isNonNegative()) 1070 MaxHighZeros = 1; 1071 } else if (SPF == SPF_SMIN) { 1072 // If both sides are non-negative, the result is non-negative. 1073 if (Known.isNonNegative() && Known2.isNonNegative()) 1074 // We can derive an upper bound on the result by taking the max of the 1075 // leading zero bits. 1076 MaxHighZeros = std::max(Known.countMinLeadingZeros(), 1077 Known2.countMinLeadingZeros()); 1078 // If either side is negative, the result is negative. 1079 else if (Known.isNegative() || Known2.isNegative()) 1080 MaxHighOnes = 1; 1081 } else if (SPF == SPF_UMAX) { 1082 // We can derive a lower bound on the result by taking the max of the 1083 // leading one bits. 1084 MaxHighOnes = 1085 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1086 } else if (SPF == SPF_UMIN) { 1087 // We can derive an upper bound on the result by taking the max of the 1088 // leading zero bits. 1089 MaxHighZeros = 1090 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1091 } else if (SPF == SPF_ABS) { 1092 // RHS from matchSelectPattern returns the negation part of abs pattern. 1093 // If the negate has an NSW flag we can assume the sign bit of the result 1094 // will be 0 because that makes abs(INT_MIN) undefined. 1095 if (Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) 1096 MaxHighZeros = 1; 1097 } 1098 1099 // Only known if known in both the LHS and RHS. 1100 Known.One &= Known2.One; 1101 Known.Zero &= Known2.Zero; 1102 if (MaxHighOnes > 0) 1103 Known.One.setHighBits(MaxHighOnes); 1104 if (MaxHighZeros > 0) 1105 Known.Zero.setHighBits(MaxHighZeros); 1106 break; 1107 } 1108 case Instruction::FPTrunc: 1109 case Instruction::FPExt: 1110 case Instruction::FPToUI: 1111 case Instruction::FPToSI: 1112 case Instruction::SIToFP: 1113 case Instruction::UIToFP: 1114 break; // Can't work with floating point. 1115 case Instruction::PtrToInt: 1116 case Instruction::IntToPtr: 1117 // Fall through and handle them the same as zext/trunc. 1118 LLVM_FALLTHROUGH; 1119 case Instruction::ZExt: 1120 case Instruction::Trunc: { 1121 Type *SrcTy = I->getOperand(0)->getType(); 1122 1123 unsigned SrcBitWidth; 1124 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1125 // which fall through here. 1126 Type *ScalarTy = SrcTy->getScalarType(); 1127 SrcBitWidth = ScalarTy->isPointerTy() ? 1128 Q.DL.getIndexTypeSizeInBits(ScalarTy) : 1129 Q.DL.getTypeSizeInBits(ScalarTy); 1130 1131 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1132 Known = Known.zextOrTrunc(SrcBitWidth); 1133 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1134 Known = Known.zextOrTrunc(BitWidth); 1135 // Any top bits are known to be zero. 1136 if (BitWidth > SrcBitWidth) 1137 Known.Zero.setBitsFrom(SrcBitWidth); 1138 break; 1139 } 1140 case Instruction::BitCast: { 1141 Type *SrcTy = I->getOperand(0)->getType(); 1142 if (SrcTy->isIntOrPtrTy() && 1143 // TODO: For now, not handling conversions like: 1144 // (bitcast i64 %x to <2 x i32>) 1145 !I->getType()->isVectorTy()) { 1146 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1147 break; 1148 } 1149 break; 1150 } 1151 case Instruction::SExt: { 1152 // Compute the bits in the result that are not present in the input. 1153 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1154 1155 Known = Known.trunc(SrcBitWidth); 1156 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1157 // If the sign bit of the input is known set or clear, then we know the 1158 // top bits of the result. 1159 Known = Known.sext(BitWidth); 1160 break; 1161 } 1162 case Instruction::Shl: { 1163 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1164 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1165 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) { 1166 APInt KZResult = KnownZero << ShiftAmt; 1167 KZResult.setLowBits(ShiftAmt); // Low bits known 0. 1168 // If this shift has "nsw" keyword, then the result is either a poison 1169 // value or has the same sign bit as the first operand. 1170 if (NSW && KnownZero.isSignBitSet()) 1171 KZResult.setSignBit(); 1172 return KZResult; 1173 }; 1174 1175 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) { 1176 APInt KOResult = KnownOne << ShiftAmt; 1177 if (NSW && KnownOne.isSignBitSet()) 1178 KOResult.setSignBit(); 1179 return KOResult; 1180 }; 1181 1182 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1183 break; 1184 } 1185 case Instruction::LShr: { 1186 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1187 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1188 APInt KZResult = KnownZero.lshr(ShiftAmt); 1189 // High bits known zero. 1190 KZResult.setHighBits(ShiftAmt); 1191 return KZResult; 1192 }; 1193 1194 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1195 return KnownOne.lshr(ShiftAmt); 1196 }; 1197 1198 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1199 break; 1200 } 1201 case Instruction::AShr: { 1202 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1203 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1204 return KnownZero.ashr(ShiftAmt); 1205 }; 1206 1207 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1208 return KnownOne.ashr(ShiftAmt); 1209 }; 1210 1211 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1212 break; 1213 } 1214 case Instruction::Sub: { 1215 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1216 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1217 Known, Known2, Depth, Q); 1218 break; 1219 } 1220 case Instruction::Add: { 1221 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1222 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1223 Known, Known2, Depth, Q); 1224 break; 1225 } 1226 case Instruction::SRem: 1227 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1228 APInt RA = Rem->getValue().abs(); 1229 if (RA.isPowerOf2()) { 1230 APInt LowBits = RA - 1; 1231 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1232 1233 // The low bits of the first operand are unchanged by the srem. 1234 Known.Zero = Known2.Zero & LowBits; 1235 Known.One = Known2.One & LowBits; 1236 1237 // If the first operand is non-negative or has all low bits zero, then 1238 // the upper bits are all zero. 1239 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 1240 Known.Zero |= ~LowBits; 1241 1242 // If the first operand is negative and not all low bits are zero, then 1243 // the upper bits are all one. 1244 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 1245 Known.One |= ~LowBits; 1246 1247 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1248 break; 1249 } 1250 } 1251 1252 // The sign bit is the LHS's sign bit, except when the result of the 1253 // remainder is zero. 1254 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1255 // If it's known zero, our sign bit is also zero. 1256 if (Known2.isNonNegative()) 1257 Known.makeNonNegative(); 1258 1259 break; 1260 case Instruction::URem: { 1261 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1262 const APInt &RA = Rem->getValue(); 1263 if (RA.isPowerOf2()) { 1264 APInt LowBits = (RA - 1); 1265 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1266 Known.Zero |= ~LowBits; 1267 Known.One &= LowBits; 1268 break; 1269 } 1270 } 1271 1272 // Since the result is less than or equal to either operand, any leading 1273 // zero bits in either operand must also exist in the result. 1274 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1275 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1276 1277 unsigned Leaders = 1278 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1279 Known.resetAll(); 1280 Known.Zero.setHighBits(Leaders); 1281 break; 1282 } 1283 1284 case Instruction::Alloca: { 1285 const AllocaInst *AI = cast<AllocaInst>(I); 1286 unsigned Align = AI->getAlignment(); 1287 if (Align == 0) 1288 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); 1289 1290 if (Align > 0) 1291 Known.Zero.setLowBits(countTrailingZeros(Align)); 1292 break; 1293 } 1294 case Instruction::GetElementPtr: { 1295 // Analyze all of the subscripts of this getelementptr instruction 1296 // to determine if we can prove known low zero bits. 1297 KnownBits LocalKnown(BitWidth); 1298 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q); 1299 unsigned TrailZ = LocalKnown.countMinTrailingZeros(); 1300 1301 gep_type_iterator GTI = gep_type_begin(I); 1302 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1303 Value *Index = I->getOperand(i); 1304 if (StructType *STy = GTI.getStructTypeOrNull()) { 1305 // Handle struct member offset arithmetic. 1306 1307 // Handle case when index is vector zeroinitializer 1308 Constant *CIndex = cast<Constant>(Index); 1309 if (CIndex->isZeroValue()) 1310 continue; 1311 1312 if (CIndex->getType()->isVectorTy()) 1313 Index = CIndex->getSplatValue(); 1314 1315 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1316 const StructLayout *SL = Q.DL.getStructLayout(STy); 1317 uint64_t Offset = SL->getElementOffset(Idx); 1318 TrailZ = std::min<unsigned>(TrailZ, 1319 countTrailingZeros(Offset)); 1320 } else { 1321 // Handle array index arithmetic. 1322 Type *IndexedTy = GTI.getIndexedType(); 1323 if (!IndexedTy->isSized()) { 1324 TrailZ = 0; 1325 break; 1326 } 1327 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1328 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1329 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0); 1330 computeKnownBits(Index, LocalKnown, Depth + 1, Q); 1331 TrailZ = std::min(TrailZ, 1332 unsigned(countTrailingZeros(TypeSize) + 1333 LocalKnown.countMinTrailingZeros())); 1334 } 1335 } 1336 1337 Known.Zero.setLowBits(TrailZ); 1338 break; 1339 } 1340 case Instruction::PHI: { 1341 const PHINode *P = cast<PHINode>(I); 1342 // Handle the case of a simple two-predecessor recurrence PHI. 1343 // There's a lot more that could theoretically be done here, but 1344 // this is sufficient to catch some interesting cases. 1345 if (P->getNumIncomingValues() == 2) { 1346 for (unsigned i = 0; i != 2; ++i) { 1347 Value *L = P->getIncomingValue(i); 1348 Value *R = P->getIncomingValue(!i); 1349 Operator *LU = dyn_cast<Operator>(L); 1350 if (!LU) 1351 continue; 1352 unsigned Opcode = LU->getOpcode(); 1353 // Check for operations that have the property that if 1354 // both their operands have low zero bits, the result 1355 // will have low zero bits. 1356 if (Opcode == Instruction::Add || 1357 Opcode == Instruction::Sub || 1358 Opcode == Instruction::And || 1359 Opcode == Instruction::Or || 1360 Opcode == Instruction::Mul) { 1361 Value *LL = LU->getOperand(0); 1362 Value *LR = LU->getOperand(1); 1363 // Find a recurrence. 1364 if (LL == I) 1365 L = LR; 1366 else if (LR == I) 1367 L = LL; 1368 else 1369 break; 1370 // Ok, we have a PHI of the form L op= R. Check for low 1371 // zero bits. 1372 computeKnownBits(R, Known2, Depth + 1, Q); 1373 1374 // We need to take the minimum number of known bits 1375 KnownBits Known3(Known); 1376 computeKnownBits(L, Known3, Depth + 1, Q); 1377 1378 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1379 Known3.countMinTrailingZeros())); 1380 1381 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU); 1382 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) { 1383 // If initial value of recurrence is nonnegative, and we are adding 1384 // a nonnegative number with nsw, the result can only be nonnegative 1385 // or poison value regardless of the number of times we execute the 1386 // add in phi recurrence. If initial value is negative and we are 1387 // adding a negative number with nsw, the result can only be 1388 // negative or poison value. Similar arguments apply to sub and mul. 1389 // 1390 // (add non-negative, non-negative) --> non-negative 1391 // (add negative, negative) --> negative 1392 if (Opcode == Instruction::Add) { 1393 if (Known2.isNonNegative() && Known3.isNonNegative()) 1394 Known.makeNonNegative(); 1395 else if (Known2.isNegative() && Known3.isNegative()) 1396 Known.makeNegative(); 1397 } 1398 1399 // (sub nsw non-negative, negative) --> non-negative 1400 // (sub nsw negative, non-negative) --> negative 1401 else if (Opcode == Instruction::Sub && LL == I) { 1402 if (Known2.isNonNegative() && Known3.isNegative()) 1403 Known.makeNonNegative(); 1404 else if (Known2.isNegative() && Known3.isNonNegative()) 1405 Known.makeNegative(); 1406 } 1407 1408 // (mul nsw non-negative, non-negative) --> non-negative 1409 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1410 Known3.isNonNegative()) 1411 Known.makeNonNegative(); 1412 } 1413 1414 break; 1415 } 1416 } 1417 } 1418 1419 // Unreachable blocks may have zero-operand PHI nodes. 1420 if (P->getNumIncomingValues() == 0) 1421 break; 1422 1423 // Otherwise take the unions of the known bit sets of the operands, 1424 // taking conservative care to avoid excessive recursion. 1425 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) { 1426 // Skip if every incoming value references to ourself. 1427 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1428 break; 1429 1430 Known.Zero.setAllBits(); 1431 Known.One.setAllBits(); 1432 for (Value *IncValue : P->incoming_values()) { 1433 // Skip direct self references. 1434 if (IncValue == P) continue; 1435 1436 Known2 = KnownBits(BitWidth); 1437 // Recurse, but cap the recursion to one level, because we don't 1438 // want to waste time spinning around in loops. 1439 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q); 1440 Known.Zero &= Known2.Zero; 1441 Known.One &= Known2.One; 1442 // If all bits have been ruled out, there's no need to check 1443 // more operands. 1444 if (!Known.Zero && !Known.One) 1445 break; 1446 } 1447 } 1448 break; 1449 } 1450 case Instruction::Call: 1451 case Instruction::Invoke: 1452 // If range metadata is attached to this call, set known bits from that, 1453 // and then intersect with known bits based on other properties of the 1454 // function. 1455 if (MDNode *MD = 1456 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range)) 1457 computeKnownBitsFromRangeMetadata(*MD, Known); 1458 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) { 1459 computeKnownBits(RV, Known2, Depth + 1, Q); 1460 Known.Zero |= Known2.Zero; 1461 Known.One |= Known2.One; 1462 } 1463 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1464 switch (II->getIntrinsicID()) { 1465 default: break; 1466 case Intrinsic::bitreverse: 1467 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1468 Known.Zero |= Known2.Zero.reverseBits(); 1469 Known.One |= Known2.One.reverseBits(); 1470 break; 1471 case Intrinsic::bswap: 1472 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1473 Known.Zero |= Known2.Zero.byteSwap(); 1474 Known.One |= Known2.One.byteSwap(); 1475 break; 1476 case Intrinsic::ctlz: { 1477 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1478 // If we have a known 1, its position is our upper bound. 1479 unsigned PossibleLZ = Known2.One.countLeadingZeros(); 1480 // If this call is undefined for 0, the result will be less than 2^n. 1481 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1482 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1483 unsigned LowBits = Log2_32(PossibleLZ)+1; 1484 Known.Zero.setBitsFrom(LowBits); 1485 break; 1486 } 1487 case Intrinsic::cttz: { 1488 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1489 // If we have a known 1, its position is our upper bound. 1490 unsigned PossibleTZ = Known2.One.countTrailingZeros(); 1491 // If this call is undefined for 0, the result will be less than 2^n. 1492 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1493 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1494 unsigned LowBits = Log2_32(PossibleTZ)+1; 1495 Known.Zero.setBitsFrom(LowBits); 1496 break; 1497 } 1498 case Intrinsic::ctpop: { 1499 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1500 // We can bound the space the count needs. Also, bits known to be zero 1501 // can't contribute to the population. 1502 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1503 unsigned LowBits = Log2_32(BitsPossiblySet)+1; 1504 Known.Zero.setBitsFrom(LowBits); 1505 // TODO: we could bound KnownOne using the lower bound on the number 1506 // of bits which might be set provided by popcnt KnownOne2. 1507 break; 1508 } 1509 case Intrinsic::x86_sse42_crc32_64_64: 1510 Known.Zero.setBitsFrom(32); 1511 break; 1512 } 1513 } 1514 break; 1515 case Instruction::ExtractElement: 1516 // Look through extract element. At the moment we keep this simple and skip 1517 // tracking the specific element. But at least we might find information 1518 // valid for all elements of the vector (for example if vector is sign 1519 // extended, shifted, etc). 1520 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1521 break; 1522 case Instruction::ExtractValue: 1523 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1524 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1525 if (EVI->getNumIndices() != 1) break; 1526 if (EVI->getIndices()[0] == 0) { 1527 switch (II->getIntrinsicID()) { 1528 default: break; 1529 case Intrinsic::uadd_with_overflow: 1530 case Intrinsic::sadd_with_overflow: 1531 computeKnownBitsAddSub(true, II->getArgOperand(0), 1532 II->getArgOperand(1), false, Known, Known2, 1533 Depth, Q); 1534 break; 1535 case Intrinsic::usub_with_overflow: 1536 case Intrinsic::ssub_with_overflow: 1537 computeKnownBitsAddSub(false, II->getArgOperand(0), 1538 II->getArgOperand(1), false, Known, Known2, 1539 Depth, Q); 1540 break; 1541 case Intrinsic::umul_with_overflow: 1542 case Intrinsic::smul_with_overflow: 1543 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1544 Known, Known2, Depth, Q); 1545 break; 1546 } 1547 } 1548 } 1549 } 1550 } 1551 1552 /// Determine which bits of V are known to be either zero or one and return 1553 /// them. 1554 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { 1555 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1556 computeKnownBits(V, Known, Depth, Q); 1557 return Known; 1558 } 1559 1560 /// Determine which bits of V are known to be either zero or one and return 1561 /// them in the Known bit set. 1562 /// 1563 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1564 /// we cannot optimize based on the assumption that it is zero without changing 1565 /// it to be an explicit zero. If we don't change it to zero, other code could 1566 /// optimized based on the contradictory assumption that it is non-zero. 1567 /// Because instcombine aggressively folds operations with undef args anyway, 1568 /// this won't lose us code quality. 1569 /// 1570 /// This function is defined on values with integer type, values with pointer 1571 /// type, and vectors of integers. In the case 1572 /// where V is a vector, known zero, and known one values are the 1573 /// same width as the vector element, and the bit is set only if it is true 1574 /// for all of the elements in the vector. 1575 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 1576 const Query &Q) { 1577 assert(V && "No Value?"); 1578 assert(Depth <= MaxDepth && "Limit Search Depth"); 1579 unsigned BitWidth = Known.getBitWidth(); 1580 1581 assert((V->getType()->isIntOrIntVectorTy(BitWidth) || 1582 V->getType()->isPtrOrPtrVectorTy()) && 1583 "Not integer or pointer type!"); 1584 1585 Type *ScalarTy = V->getType()->getScalarType(); 1586 unsigned ExpectedWidth = ScalarTy->isPointerTy() ? 1587 Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); 1588 assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth"); 1589 (void)BitWidth; 1590 (void)ExpectedWidth; 1591 1592 const APInt *C; 1593 if (match(V, m_APInt(C))) { 1594 // We know all of the bits for a scalar constant or a splat vector constant! 1595 Known.One = *C; 1596 Known.Zero = ~Known.One; 1597 return; 1598 } 1599 // Null and aggregate-zero are all-zeros. 1600 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1601 Known.setAllZero(); 1602 return; 1603 } 1604 // Handle a constant vector by taking the intersection of the known bits of 1605 // each element. 1606 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 1607 // We know that CDS must be a vector of integers. Take the intersection of 1608 // each element. 1609 Known.Zero.setAllBits(); Known.One.setAllBits(); 1610 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1611 APInt Elt = CDS->getElementAsAPInt(i); 1612 Known.Zero &= ~Elt; 1613 Known.One &= Elt; 1614 } 1615 return; 1616 } 1617 1618 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1619 // We know that CV must be a vector of integers. Take the intersection of 1620 // each element. 1621 Known.Zero.setAllBits(); Known.One.setAllBits(); 1622 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1623 Constant *Element = CV->getAggregateElement(i); 1624 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1625 if (!ElementCI) { 1626 Known.resetAll(); 1627 return; 1628 } 1629 const APInt &Elt = ElementCI->getValue(); 1630 Known.Zero &= ~Elt; 1631 Known.One &= Elt; 1632 } 1633 return; 1634 } 1635 1636 // Start out not knowing anything. 1637 Known.resetAll(); 1638 1639 // We can't imply anything about undefs. 1640 if (isa<UndefValue>(V)) 1641 return; 1642 1643 // There's no point in looking through other users of ConstantData for 1644 // assumptions. Confirm that we've handled them all. 1645 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1646 1647 // Limit search depth. 1648 // All recursive calls that increase depth must come after this. 1649 if (Depth == MaxDepth) 1650 return; 1651 1652 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1653 // the bits of its aliasee. 1654 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1655 if (!GA->isInterposable()) 1656 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 1657 return; 1658 } 1659 1660 if (const Operator *I = dyn_cast<Operator>(V)) 1661 computeKnownBitsFromOperator(I, Known, Depth, Q); 1662 1663 // Aligned pointers have trailing zeros - refine Known.Zero set 1664 if (V->getType()->isPointerTy()) { 1665 unsigned Align = V->getPointerAlignment(Q.DL); 1666 if (Align) 1667 Known.Zero.setLowBits(countTrailingZeros(Align)); 1668 } 1669 1670 // computeKnownBitsFromAssume strictly refines Known. 1671 // Therefore, we run them after computeKnownBitsFromOperator. 1672 1673 // Check whether a nearby assume intrinsic can determine some known bits. 1674 computeKnownBitsFromAssume(V, Known, Depth, Q); 1675 1676 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1677 } 1678 1679 /// Return true if the given value is known to have exactly one 1680 /// bit set when defined. For vectors return true if every element is known to 1681 /// be a power of two when defined. Supports values with integer or pointer 1682 /// types and vectors of integers. 1683 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 1684 const Query &Q) { 1685 assert(Depth <= MaxDepth && "Limit Search Depth"); 1686 1687 // Attempt to match against constants. 1688 if (OrZero && match(V, m_Power2OrZero())) 1689 return true; 1690 if (match(V, m_Power2())) 1691 return true; 1692 1693 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1694 // it is shifted off the end then the result is undefined. 1695 if (match(V, m_Shl(m_One(), m_Value()))) 1696 return true; 1697 1698 // (signmask) >>l X is clearly a power of two if the one is not shifted off 1699 // the bottom. If it is shifted off the bottom then the result is undefined. 1700 if (match(V, m_LShr(m_SignMask(), m_Value()))) 1701 return true; 1702 1703 // The remaining tests are all recursive, so bail out if we hit the limit. 1704 if (Depth++ == MaxDepth) 1705 return false; 1706 1707 Value *X = nullptr, *Y = nullptr; 1708 // A shift left or a logical shift right of a power of two is a power of two 1709 // or zero. 1710 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1711 match(V, m_LShr(m_Value(X), m_Value())))) 1712 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 1713 1714 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1715 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 1716 1717 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 1718 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 1719 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 1720 1721 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1722 // A power of two and'd with anything is a power of two or zero. 1723 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 1724 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 1725 return true; 1726 // X & (-X) is always a power of two or zero. 1727 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1728 return true; 1729 return false; 1730 } 1731 1732 // Adding a power-of-two or zero to the same power-of-two or zero yields 1733 // either the original power-of-two, a larger power-of-two or zero. 1734 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1735 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1736 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) || 1737 Q.IIQ.hasNoSignedWrap(VOBO)) { 1738 if (match(X, m_And(m_Specific(Y), m_Value())) || 1739 match(X, m_And(m_Value(), m_Specific(Y)))) 1740 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 1741 return true; 1742 if (match(Y, m_And(m_Specific(X), m_Value())) || 1743 match(Y, m_And(m_Value(), m_Specific(X)))) 1744 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 1745 return true; 1746 1747 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1748 KnownBits LHSBits(BitWidth); 1749 computeKnownBits(X, LHSBits, Depth, Q); 1750 1751 KnownBits RHSBits(BitWidth); 1752 computeKnownBits(Y, RHSBits, Depth, Q); 1753 // If i8 V is a power of two or zero: 1754 // ZeroBits: 1 1 1 0 1 1 1 1 1755 // ~ZeroBits: 0 0 0 1 0 0 0 0 1756 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 1757 // If OrZero isn't set, we cannot give back a zero result. 1758 // Make sure either the LHS or RHS has a bit set. 1759 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 1760 return true; 1761 } 1762 } 1763 1764 // An exact divide or right shift can only shift off zero bits, so the result 1765 // is a power of two only if the first operand is a power of two and not 1766 // copying a sign bit (sdiv int_min, 2). 1767 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1768 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1769 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1770 Depth, Q); 1771 } 1772 1773 return false; 1774 } 1775 1776 /// Test whether a GEP's result is known to be non-null. 1777 /// 1778 /// Uses properties inherent in a GEP to try to determine whether it is known 1779 /// to be non-null. 1780 /// 1781 /// Currently this routine does not support vector GEPs. 1782 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 1783 const Query &Q) { 1784 const Function *F = nullptr; 1785 if (const Instruction *I = dyn_cast<Instruction>(GEP)) 1786 F = I->getFunction(); 1787 1788 if (!GEP->isInBounds() || 1789 NullPointerIsDefined(F, GEP->getPointerAddressSpace())) 1790 return false; 1791 1792 // FIXME: Support vector-GEPs. 1793 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1794 1795 // If the base pointer is non-null, we cannot walk to a null address with an 1796 // inbounds GEP in address space zero. 1797 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 1798 return true; 1799 1800 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1801 // If so, then the GEP cannot produce a null pointer, as doing so would 1802 // inherently violate the inbounds contract within address space zero. 1803 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1804 GTI != GTE; ++GTI) { 1805 // Struct types are easy -- they must always be indexed by a constant. 1806 if (StructType *STy = GTI.getStructTypeOrNull()) { 1807 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1808 unsigned ElementIdx = OpC->getZExtValue(); 1809 const StructLayout *SL = Q.DL.getStructLayout(STy); 1810 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1811 if (ElementOffset > 0) 1812 return true; 1813 continue; 1814 } 1815 1816 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1817 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1818 continue; 1819 1820 // Fast path the constant operand case both for efficiency and so we don't 1821 // increment Depth when just zipping down an all-constant GEP. 1822 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1823 if (!OpC->isZero()) 1824 return true; 1825 continue; 1826 } 1827 1828 // We post-increment Depth here because while isKnownNonZero increments it 1829 // as well, when we pop back up that increment won't persist. We don't want 1830 // to recurse 10k times just because we have 10k GEP operands. We don't 1831 // bail completely out because we want to handle constant GEPs regardless 1832 // of depth. 1833 if (Depth++ >= MaxDepth) 1834 continue; 1835 1836 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 1837 return true; 1838 } 1839 1840 return false; 1841 } 1842 1843 static bool isKnownNonNullFromDominatingCondition(const Value *V, 1844 const Instruction *CtxI, 1845 const DominatorTree *DT) { 1846 assert(V->getType()->isPointerTy() && "V must be pointer type"); 1847 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull"); 1848 1849 if (!CtxI || !DT) 1850 return false; 1851 1852 unsigned NumUsesExplored = 0; 1853 for (auto *U : V->users()) { 1854 // Avoid massive lists 1855 if (NumUsesExplored >= DomConditionsMaxUses) 1856 break; 1857 NumUsesExplored++; 1858 1859 // If the value is used as an argument to a call or invoke, then argument 1860 // attributes may provide an answer about null-ness. 1861 if (auto CS = ImmutableCallSite(U)) 1862 if (auto *CalledFunc = CS.getCalledFunction()) 1863 for (const Argument &Arg : CalledFunc->args()) 1864 if (CS.getArgOperand(Arg.getArgNo()) == V && 1865 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) 1866 return true; 1867 1868 // Consider only compare instructions uniquely controlling a branch 1869 CmpInst::Predicate Pred; 1870 if (!match(const_cast<User *>(U), 1871 m_c_ICmp(Pred, m_Specific(V), m_Zero())) || 1872 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) 1873 continue; 1874 1875 SmallVector<const User *, 4> WorkList; 1876 SmallPtrSet<const User *, 4> Visited; 1877 for (auto *CmpU : U->users()) { 1878 assert(WorkList.empty() && "Should be!"); 1879 if (Visited.insert(CmpU).second) 1880 WorkList.push_back(CmpU); 1881 1882 while (!WorkList.empty()) { 1883 auto *Curr = WorkList.pop_back_val(); 1884 1885 // If a user is an AND, add all its users to the work list. We only 1886 // propagate "pred != null" condition through AND because it is only 1887 // correct to assume that all conditions of AND are met in true branch. 1888 // TODO: Support similar logic of OR and EQ predicate? 1889 if (Pred == ICmpInst::ICMP_NE) 1890 if (auto *BO = dyn_cast<BinaryOperator>(Curr)) 1891 if (BO->getOpcode() == Instruction::And) { 1892 for (auto *BOU : BO->users()) 1893 if (Visited.insert(BOU).second) 1894 WorkList.push_back(BOU); 1895 continue; 1896 } 1897 1898 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) { 1899 assert(BI->isConditional() && "uses a comparison!"); 1900 1901 BasicBlock *NonNullSuccessor = 1902 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); 1903 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 1904 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 1905 return true; 1906 } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) && 1907 DT->dominates(cast<Instruction>(Curr), CtxI)) { 1908 return true; 1909 } 1910 } 1911 } 1912 } 1913 1914 return false; 1915 } 1916 1917 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1918 /// ensure that the value it's attached to is never Value? 'RangeType' is 1919 /// is the type of the value described by the range. 1920 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 1921 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1922 assert(NumRanges >= 1); 1923 for (unsigned i = 0; i < NumRanges; ++i) { 1924 ConstantInt *Lower = 1925 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1926 ConstantInt *Upper = 1927 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1928 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1929 if (Range.contains(Value)) 1930 return false; 1931 } 1932 return true; 1933 } 1934 1935 /// Return true if the given value is known to be non-zero when defined. For 1936 /// vectors, return true if every element is known to be non-zero when 1937 /// defined. For pointers, if the context instruction and dominator tree are 1938 /// specified, perform context-sensitive analysis and return true if the 1939 /// pointer couldn't possibly be null at the specified instruction. 1940 /// Supports values with integer or pointer type and vectors of integers. 1941 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) { 1942 if (auto *C = dyn_cast<Constant>(V)) { 1943 if (C->isNullValue()) 1944 return false; 1945 if (isa<ConstantInt>(C)) 1946 // Must be non-zero due to null test above. 1947 return true; 1948 1949 // For constant vectors, check that all elements are undefined or known 1950 // non-zero to determine that the whole vector is known non-zero. 1951 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) { 1952 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 1953 Constant *Elt = C->getAggregateElement(i); 1954 if (!Elt || Elt->isNullValue()) 1955 return false; 1956 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 1957 return false; 1958 } 1959 return true; 1960 } 1961 1962 // A global variable in address space 0 is non null unless extern weak 1963 // or an absolute symbol reference. Other address spaces may have null as a 1964 // valid address for a global, so we can't assume anything. 1965 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 1966 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 1967 GV->getType()->getAddressSpace() == 0) 1968 return true; 1969 } else 1970 return false; 1971 } 1972 1973 if (auto *I = dyn_cast<Instruction>(V)) { 1974 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) { 1975 // If the possible ranges don't contain zero, then the value is 1976 // definitely non-zero. 1977 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 1978 const APInt ZeroValue(Ty->getBitWidth(), 0); 1979 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 1980 return true; 1981 } 1982 } 1983 } 1984 1985 // Some of the tests below are recursive, so bail out if we hit the limit. 1986 if (Depth++ >= MaxDepth) 1987 return false; 1988 1989 // Check for pointer simplifications. 1990 if (V->getType()->isPointerTy()) { 1991 // Alloca never returns null, malloc might. 1992 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) 1993 return true; 1994 1995 // A byval, inalloca, or nonnull argument is never null. 1996 if (const Argument *A = dyn_cast<Argument>(V)) 1997 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr()) 1998 return true; 1999 2000 // A Load tagged with nonnull metadata is never null. 2001 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 2002 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull)) 2003 return true; 2004 2005 if (auto CS = ImmutableCallSite(V)) { 2006 if (CS.isReturnNonNull()) 2007 return true; 2008 if (const auto *RP = getArgumentAliasingToReturnedPointer(CS)) 2009 return isKnownNonZero(RP, Depth, Q); 2010 } 2011 } 2012 2013 2014 // Check for recursive pointer simplifications. 2015 if (V->getType()->isPointerTy()) { 2016 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) 2017 return true; 2018 2019 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 2020 if (isGEPKnownNonNull(GEP, Depth, Q)) 2021 return true; 2022 } 2023 2024 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 2025 2026 // X | Y != 0 if X != 0 or Y != 0. 2027 Value *X = nullptr, *Y = nullptr; 2028 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 2029 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q); 2030 2031 // ext X != 0 if X != 0. 2032 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 2033 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 2034 2035 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 2036 // if the lowest bit is shifted off the end. 2037 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { 2038 // shl nuw can't remove any non-zero bits. 2039 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2040 if (Q.IIQ.hasNoUnsignedWrap(BO)) 2041 return isKnownNonZero(X, Depth, Q); 2042 2043 KnownBits Known(BitWidth); 2044 computeKnownBits(X, Known, Depth, Q); 2045 if (Known.One[0]) 2046 return true; 2047 } 2048 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 2049 // defined if the sign bit is shifted off the end. 2050 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 2051 // shr exact can only shift out zero bits. 2052 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 2053 if (BO->isExact()) 2054 return isKnownNonZero(X, Depth, Q); 2055 2056 KnownBits Known = computeKnownBits(X, Depth, Q); 2057 if (Known.isNegative()) 2058 return true; 2059 2060 // If the shifter operand is a constant, and all of the bits shifted 2061 // out are known to be zero, and X is known non-zero then at least one 2062 // non-zero bit must remain. 2063 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 2064 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 2065 // Is there a known one in the portion not shifted out? 2066 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) 2067 return true; 2068 // Are all the bits to be shifted out known zero? 2069 if (Known.countMinTrailingZeros() >= ShiftVal) 2070 return isKnownNonZero(X, Depth, Q); 2071 } 2072 } 2073 // div exact can only produce a zero if the dividend is zero. 2074 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 2075 return isKnownNonZero(X, Depth, Q); 2076 } 2077 // X + Y. 2078 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2079 KnownBits XKnown = computeKnownBits(X, Depth, Q); 2080 KnownBits YKnown = computeKnownBits(Y, Depth, Q); 2081 2082 // If X and Y are both non-negative (as signed values) then their sum is not 2083 // zero unless both X and Y are zero. 2084 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 2085 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q)) 2086 return true; 2087 2088 // If X and Y are both negative (as signed values) then their sum is not 2089 // zero unless both X and Y equal INT_MIN. 2090 if (XKnown.isNegative() && YKnown.isNegative()) { 2091 APInt Mask = APInt::getSignedMaxValue(BitWidth); 2092 // The sign bit of X is set. If some other bit is set then X is not equal 2093 // to INT_MIN. 2094 if (XKnown.One.intersects(Mask)) 2095 return true; 2096 // The sign bit of Y is set. If some other bit is set then Y is not equal 2097 // to INT_MIN. 2098 if (YKnown.One.intersects(Mask)) 2099 return true; 2100 } 2101 2102 // The sum of a non-negative number and a power of two is not zero. 2103 if (XKnown.isNonNegative() && 2104 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 2105 return true; 2106 if (YKnown.isNonNegative() && 2107 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 2108 return true; 2109 } 2110 // X * Y. 2111 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 2112 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2113 // If X and Y are non-zero then so is X * Y as long as the multiplication 2114 // does not overflow. 2115 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) && 2116 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q)) 2117 return true; 2118 } 2119 // (C ? X : Y) != 0 if X != 0 and Y != 0. 2120 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 2121 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) && 2122 isKnownNonZero(SI->getFalseValue(), Depth, Q)) 2123 return true; 2124 } 2125 // PHI 2126 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 2127 // Try and detect a recurrence that monotonically increases from a 2128 // starting value, as these are common as induction variables. 2129 if (PN->getNumIncomingValues() == 2) { 2130 Value *Start = PN->getIncomingValue(0); 2131 Value *Induction = PN->getIncomingValue(1); 2132 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 2133 std::swap(Start, Induction); 2134 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 2135 if (!C->isZero() && !C->isNegative()) { 2136 ConstantInt *X; 2137 if (Q.IIQ.UseInstrInfo && 2138 (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 2139 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 2140 !X->isNegative()) 2141 return true; 2142 } 2143 } 2144 } 2145 // Check if all incoming values are non-zero constant. 2146 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) { 2147 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero(); 2148 }); 2149 if (AllNonZeroConstants) 2150 return true; 2151 } 2152 2153 KnownBits Known(BitWidth); 2154 computeKnownBits(V, Known, Depth, Q); 2155 return Known.One != 0; 2156 } 2157 2158 /// Return true if V2 == V1 + X, where X is known non-zero. 2159 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { 2160 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2161 if (!BO || BO->getOpcode() != Instruction::Add) 2162 return false; 2163 Value *Op = nullptr; 2164 if (V2 == BO->getOperand(0)) 2165 Op = BO->getOperand(1); 2166 else if (V2 == BO->getOperand(1)) 2167 Op = BO->getOperand(0); 2168 else 2169 return false; 2170 return isKnownNonZero(Op, 0, Q); 2171 } 2172 2173 /// Return true if it is known that V1 != V2. 2174 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { 2175 if (V1 == V2) 2176 return false; 2177 if (V1->getType() != V2->getType()) 2178 // We can't look through casts yet. 2179 return false; 2180 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) 2181 return true; 2182 2183 if (V1->getType()->isIntOrIntVectorTy()) { 2184 // Are any known bits in V1 contradictory to known bits in V2? If V1 2185 // has a known zero where V2 has a known one, they must not be equal. 2186 KnownBits Known1 = computeKnownBits(V1, 0, Q); 2187 KnownBits Known2 = computeKnownBits(V2, 0, Q); 2188 2189 if (Known1.Zero.intersects(Known2.One) || 2190 Known2.Zero.intersects(Known1.One)) 2191 return true; 2192 } 2193 return false; 2194 } 2195 2196 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2197 /// simplify operations downstream. Mask is known to be zero for bits that V 2198 /// cannot have. 2199 /// 2200 /// This function is defined on values with integer type, values with pointer 2201 /// type, and vectors of integers. In the case 2202 /// where V is a vector, the mask, known zero, and known one values are the 2203 /// same width as the vector element, and the bit is set only if it is true 2204 /// for all of the elements in the vector. 2205 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2206 const Query &Q) { 2207 KnownBits Known(Mask.getBitWidth()); 2208 computeKnownBits(V, Known, Depth, Q); 2209 return Mask.isSubsetOf(Known.Zero); 2210 } 2211 2212 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow). 2213 // Returns the input and lower/upper bounds. 2214 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, 2215 const APInt *&CLow, const APInt *&CHigh) { 2216 assert(isa<Operator>(Select) && 2217 cast<Operator>(Select)->getOpcode() == Instruction::Select && 2218 "Input should be a Select!"); 2219 2220 const Value *LHS, *RHS, *LHS2, *RHS2; 2221 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor; 2222 if (SPF != SPF_SMAX && SPF != SPF_SMIN) 2223 return false; 2224 2225 if (!match(RHS, m_APInt(CLow))) 2226 return false; 2227 2228 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor; 2229 if (getInverseMinMaxFlavor(SPF) != SPF2) 2230 return false; 2231 2232 if (!match(RHS2, m_APInt(CHigh))) 2233 return false; 2234 2235 if (SPF == SPF_SMIN) 2236 std::swap(CLow, CHigh); 2237 2238 In = LHS2; 2239 return CLow->sle(*CHigh); 2240 } 2241 2242 /// For vector constants, loop over the elements and find the constant with the 2243 /// minimum number of sign bits. Return 0 if the value is not a vector constant 2244 /// or if any element was not analyzed; otherwise, return the count for the 2245 /// element with the minimum number of sign bits. 2246 static unsigned computeNumSignBitsVectorConstant(const Value *V, 2247 unsigned TyBits) { 2248 const auto *CV = dyn_cast<Constant>(V); 2249 if (!CV || !CV->getType()->isVectorTy()) 2250 return 0; 2251 2252 unsigned MinSignBits = TyBits; 2253 unsigned NumElts = CV->getType()->getVectorNumElements(); 2254 for (unsigned i = 0; i != NumElts; ++i) { 2255 // If we find a non-ConstantInt, bail out. 2256 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 2257 if (!Elt) 2258 return 0; 2259 2260 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); 2261 } 2262 2263 return MinSignBits; 2264 } 2265 2266 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2267 const Query &Q); 2268 2269 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 2270 const Query &Q) { 2271 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q); 2272 assert(Result > 0 && "At least one sign bit needs to be present!"); 2273 return Result; 2274 } 2275 2276 /// Return the number of times the sign bit of the register is replicated into 2277 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2278 /// (itself), but other cases can give us information. For example, immediately 2279 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2280 /// other, so we return 3. For vectors, return the number of sign bits for the 2281 /// vector element with the minimum number of known sign bits. 2282 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2283 const Query &Q) { 2284 assert(Depth <= MaxDepth && "Limit Search Depth"); 2285 2286 // We return the minimum number of sign bits that are guaranteed to be present 2287 // in V, so for undef we have to conservatively return 1. We don't have the 2288 // same behavior for poison though -- that's a FIXME today. 2289 2290 Type *ScalarTy = V->getType()->getScalarType(); 2291 unsigned TyBits = ScalarTy->isPointerTy() ? 2292 Q.DL.getIndexTypeSizeInBits(ScalarTy) : 2293 Q.DL.getTypeSizeInBits(ScalarTy); 2294 2295 unsigned Tmp, Tmp2; 2296 unsigned FirstAnswer = 1; 2297 2298 // Note that ConstantInt is handled by the general computeKnownBits case 2299 // below. 2300 2301 if (Depth == MaxDepth) 2302 return 1; // Limit search depth. 2303 2304 const Operator *U = dyn_cast<Operator>(V); 2305 switch (Operator::getOpcode(V)) { 2306 default: break; 2307 case Instruction::SExt: 2308 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2309 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 2310 2311 case Instruction::SDiv: { 2312 const APInt *Denominator; 2313 // sdiv X, C -> adds log(C) sign bits. 2314 if (match(U->getOperand(1), m_APInt(Denominator))) { 2315 2316 // Ignore non-positive denominator. 2317 if (!Denominator->isStrictlyPositive()) 2318 break; 2319 2320 // Calculate the incoming numerator bits. 2321 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2322 2323 // Add floor(log(C)) bits to the numerator bits. 2324 return std::min(TyBits, NumBits + Denominator->logBase2()); 2325 } 2326 break; 2327 } 2328 2329 case Instruction::SRem: { 2330 const APInt *Denominator; 2331 // srem X, C -> we know that the result is within [-C+1,C) when C is a 2332 // positive constant. This let us put a lower bound on the number of sign 2333 // bits. 2334 if (match(U->getOperand(1), m_APInt(Denominator))) { 2335 2336 // Ignore non-positive denominator. 2337 if (!Denominator->isStrictlyPositive()) 2338 break; 2339 2340 // Calculate the incoming numerator bits. SRem by a positive constant 2341 // can't lower the number of sign bits. 2342 unsigned NumrBits = 2343 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2344 2345 // Calculate the leading sign bit constraints by examining the 2346 // denominator. Given that the denominator is positive, there are two 2347 // cases: 2348 // 2349 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2350 // (1 << ceilLogBase2(C)). 2351 // 2352 // 2. the numerator is negative. Then the result range is (-C,0] and 2353 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2354 // 2355 // Thus a lower bound on the number of sign bits is `TyBits - 2356 // ceilLogBase2(C)`. 2357 2358 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2359 return std::max(NumrBits, ResBits); 2360 } 2361 break; 2362 } 2363 2364 case Instruction::AShr: { 2365 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2366 // ashr X, C -> adds C sign bits. Vectors too. 2367 const APInt *ShAmt; 2368 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2369 if (ShAmt->uge(TyBits)) 2370 break; // Bad shift. 2371 unsigned ShAmtLimited = ShAmt->getZExtValue(); 2372 Tmp += ShAmtLimited; 2373 if (Tmp > TyBits) Tmp = TyBits; 2374 } 2375 return Tmp; 2376 } 2377 case Instruction::Shl: { 2378 const APInt *ShAmt; 2379 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2380 // shl destroys sign bits. 2381 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2382 if (ShAmt->uge(TyBits) || // Bad shift. 2383 ShAmt->uge(Tmp)) break; // Shifted all sign bits out. 2384 Tmp2 = ShAmt->getZExtValue(); 2385 return Tmp - Tmp2; 2386 } 2387 break; 2388 } 2389 case Instruction::And: 2390 case Instruction::Or: 2391 case Instruction::Xor: // NOT is handled here. 2392 // Logical binary ops preserve the number of sign bits at the worst. 2393 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2394 if (Tmp != 1) { 2395 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2396 FirstAnswer = std::min(Tmp, Tmp2); 2397 // We computed what we know about the sign bits as our first 2398 // answer. Now proceed to the generic code that uses 2399 // computeKnownBits, and pick whichever answer is better. 2400 } 2401 break; 2402 2403 case Instruction::Select: { 2404 // If we have a clamp pattern, we know that the number of sign bits will be 2405 // the minimum of the clamp min/max range. 2406 const Value *X; 2407 const APInt *CLow, *CHigh; 2408 if (isSignedMinMaxClamp(U, X, CLow, CHigh)) 2409 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); 2410 2411 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2412 if (Tmp == 1) break; 2413 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 2414 return std::min(Tmp, Tmp2); 2415 } 2416 2417 case Instruction::Add: 2418 // Add can have at most one carry bit. Thus we know that the output 2419 // is, at worst, one more bit than the inputs. 2420 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2421 if (Tmp == 1) break; 2422 2423 // Special case decrementing a value (ADD X, -1): 2424 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2425 if (CRHS->isAllOnesValue()) { 2426 KnownBits Known(TyBits); 2427 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 2428 2429 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2430 // sign bits set. 2431 if ((Known.Zero | 1).isAllOnesValue()) 2432 return TyBits; 2433 2434 // If we are subtracting one from a positive number, there is no carry 2435 // out of the result. 2436 if (Known.isNonNegative()) 2437 return Tmp; 2438 } 2439 2440 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2441 if (Tmp2 == 1) break; 2442 return std::min(Tmp, Tmp2)-1; 2443 2444 case Instruction::Sub: 2445 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2446 if (Tmp2 == 1) break; 2447 2448 // Handle NEG. 2449 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2450 if (CLHS->isNullValue()) { 2451 KnownBits Known(TyBits); 2452 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 2453 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2454 // sign bits set. 2455 if ((Known.Zero | 1).isAllOnesValue()) 2456 return TyBits; 2457 2458 // If the input is known to be positive (the sign bit is known clear), 2459 // the output of the NEG has the same number of sign bits as the input. 2460 if (Known.isNonNegative()) 2461 return Tmp2; 2462 2463 // Otherwise, we treat this like a SUB. 2464 } 2465 2466 // Sub can have at most one carry bit. Thus we know that the output 2467 // is, at worst, one more bit than the inputs. 2468 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2469 if (Tmp == 1) break; 2470 return std::min(Tmp, Tmp2)-1; 2471 2472 case Instruction::Mul: { 2473 // The output of the Mul can be at most twice the valid bits in the inputs. 2474 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2475 if (SignBitsOp0 == 1) break; 2476 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2477 if (SignBitsOp1 == 1) break; 2478 unsigned OutValidBits = 2479 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); 2480 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; 2481 } 2482 2483 case Instruction::PHI: { 2484 const PHINode *PN = cast<PHINode>(U); 2485 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2486 // Don't analyze large in-degree PHIs. 2487 if (NumIncomingValues > 4) break; 2488 // Unreachable blocks may have zero-operand PHI nodes. 2489 if (NumIncomingValues == 0) break; 2490 2491 // Take the minimum of all incoming values. This can't infinitely loop 2492 // because of our depth threshold. 2493 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q); 2494 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2495 if (Tmp == 1) return Tmp; 2496 Tmp = std::min( 2497 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q)); 2498 } 2499 return Tmp; 2500 } 2501 2502 case Instruction::Trunc: 2503 // FIXME: it's tricky to do anything useful for this, but it is an important 2504 // case for targets like X86. 2505 break; 2506 2507 case Instruction::ExtractElement: 2508 // Look through extract element. At the moment we keep this simple and skip 2509 // tracking the specific element. But at least we might find information 2510 // valid for all elements of the vector (for example if vector is sign 2511 // extended, shifted, etc). 2512 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2513 } 2514 2515 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2516 // use this information. 2517 2518 // If we can examine all elements of a vector constant successfully, we're 2519 // done (we can't do any better than that). If not, keep trying. 2520 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits)) 2521 return VecSignBits; 2522 2523 KnownBits Known(TyBits); 2524 computeKnownBits(V, Known, Depth, Q); 2525 2526 // If we know that the sign bit is either zero or one, determine the number of 2527 // identical bits in the top of the input value. 2528 return std::max(FirstAnswer, Known.countMinSignBits()); 2529 } 2530 2531 /// This function computes the integer multiple of Base that equals V. 2532 /// If successful, it returns true and returns the multiple in 2533 /// Multiple. If unsuccessful, it returns false. It looks 2534 /// through SExt instructions only if LookThroughSExt is true. 2535 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2536 bool LookThroughSExt, unsigned Depth) { 2537 const unsigned MaxDepth = 6; 2538 2539 assert(V && "No Value?"); 2540 assert(Depth <= MaxDepth && "Limit Search Depth"); 2541 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2542 2543 Type *T = V->getType(); 2544 2545 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2546 2547 if (Base == 0) 2548 return false; 2549 2550 if (Base == 1) { 2551 Multiple = V; 2552 return true; 2553 } 2554 2555 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2556 Constant *BaseVal = ConstantInt::get(T, Base); 2557 if (CO && CO == BaseVal) { 2558 // Multiple is 1. 2559 Multiple = ConstantInt::get(T, 1); 2560 return true; 2561 } 2562 2563 if (CI && CI->getZExtValue() % Base == 0) { 2564 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2565 return true; 2566 } 2567 2568 if (Depth == MaxDepth) return false; // Limit search depth. 2569 2570 Operator *I = dyn_cast<Operator>(V); 2571 if (!I) return false; 2572 2573 switch (I->getOpcode()) { 2574 default: break; 2575 case Instruction::SExt: 2576 if (!LookThroughSExt) return false; 2577 // otherwise fall through to ZExt 2578 LLVM_FALLTHROUGH; 2579 case Instruction::ZExt: 2580 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2581 LookThroughSExt, Depth+1); 2582 case Instruction::Shl: 2583 case Instruction::Mul: { 2584 Value *Op0 = I->getOperand(0); 2585 Value *Op1 = I->getOperand(1); 2586 2587 if (I->getOpcode() == Instruction::Shl) { 2588 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2589 if (!Op1CI) return false; 2590 // Turn Op0 << Op1 into Op0 * 2^Op1 2591 APInt Op1Int = Op1CI->getValue(); 2592 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2593 APInt API(Op1Int.getBitWidth(), 0); 2594 API.setBit(BitToSet); 2595 Op1 = ConstantInt::get(V->getContext(), API); 2596 } 2597 2598 Value *Mul0 = nullptr; 2599 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2600 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2601 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2602 if (Op1C->getType()->getPrimitiveSizeInBits() < 2603 MulC->getType()->getPrimitiveSizeInBits()) 2604 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2605 if (Op1C->getType()->getPrimitiveSizeInBits() > 2606 MulC->getType()->getPrimitiveSizeInBits()) 2607 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2608 2609 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2610 Multiple = ConstantExpr::getMul(MulC, Op1C); 2611 return true; 2612 } 2613 2614 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2615 if (Mul0CI->getValue() == 1) { 2616 // V == Base * Op1, so return Op1 2617 Multiple = Op1; 2618 return true; 2619 } 2620 } 2621 2622 Value *Mul1 = nullptr; 2623 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2624 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2625 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2626 if (Op0C->getType()->getPrimitiveSizeInBits() < 2627 MulC->getType()->getPrimitiveSizeInBits()) 2628 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2629 if (Op0C->getType()->getPrimitiveSizeInBits() > 2630 MulC->getType()->getPrimitiveSizeInBits()) 2631 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2632 2633 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2634 Multiple = ConstantExpr::getMul(MulC, Op0C); 2635 return true; 2636 } 2637 2638 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2639 if (Mul1CI->getValue() == 1) { 2640 // V == Base * Op0, so return Op0 2641 Multiple = Op0; 2642 return true; 2643 } 2644 } 2645 } 2646 } 2647 2648 // We could not determine if V is a multiple of Base. 2649 return false; 2650 } 2651 2652 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS, 2653 const TargetLibraryInfo *TLI) { 2654 const Function *F = ICS.getCalledFunction(); 2655 if (!F) 2656 return Intrinsic::not_intrinsic; 2657 2658 if (F->isIntrinsic()) 2659 return F->getIntrinsicID(); 2660 2661 if (!TLI) 2662 return Intrinsic::not_intrinsic; 2663 2664 LibFunc Func; 2665 // We're going to make assumptions on the semantics of the functions, check 2666 // that the target knows that it's available in this environment and it does 2667 // not have local linkage. 2668 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func)) 2669 return Intrinsic::not_intrinsic; 2670 2671 if (!ICS.onlyReadsMemory()) 2672 return Intrinsic::not_intrinsic; 2673 2674 // Otherwise check if we have a call to a function that can be turned into a 2675 // vector intrinsic. 2676 switch (Func) { 2677 default: 2678 break; 2679 case LibFunc_sin: 2680 case LibFunc_sinf: 2681 case LibFunc_sinl: 2682 return Intrinsic::sin; 2683 case LibFunc_cos: 2684 case LibFunc_cosf: 2685 case LibFunc_cosl: 2686 return Intrinsic::cos; 2687 case LibFunc_exp: 2688 case LibFunc_expf: 2689 case LibFunc_expl: 2690 return Intrinsic::exp; 2691 case LibFunc_exp2: 2692 case LibFunc_exp2f: 2693 case LibFunc_exp2l: 2694 return Intrinsic::exp2; 2695 case LibFunc_log: 2696 case LibFunc_logf: 2697 case LibFunc_logl: 2698 return Intrinsic::log; 2699 case LibFunc_log10: 2700 case LibFunc_log10f: 2701 case LibFunc_log10l: 2702 return Intrinsic::log10; 2703 case LibFunc_log2: 2704 case LibFunc_log2f: 2705 case LibFunc_log2l: 2706 return Intrinsic::log2; 2707 case LibFunc_fabs: 2708 case LibFunc_fabsf: 2709 case LibFunc_fabsl: 2710 return Intrinsic::fabs; 2711 case LibFunc_fmin: 2712 case LibFunc_fminf: 2713 case LibFunc_fminl: 2714 return Intrinsic::minnum; 2715 case LibFunc_fmax: 2716 case LibFunc_fmaxf: 2717 case LibFunc_fmaxl: 2718 return Intrinsic::maxnum; 2719 case LibFunc_copysign: 2720 case LibFunc_copysignf: 2721 case LibFunc_copysignl: 2722 return Intrinsic::copysign; 2723 case LibFunc_floor: 2724 case LibFunc_floorf: 2725 case LibFunc_floorl: 2726 return Intrinsic::floor; 2727 case LibFunc_ceil: 2728 case LibFunc_ceilf: 2729 case LibFunc_ceill: 2730 return Intrinsic::ceil; 2731 case LibFunc_trunc: 2732 case LibFunc_truncf: 2733 case LibFunc_truncl: 2734 return Intrinsic::trunc; 2735 case LibFunc_rint: 2736 case LibFunc_rintf: 2737 case LibFunc_rintl: 2738 return Intrinsic::rint; 2739 case LibFunc_nearbyint: 2740 case LibFunc_nearbyintf: 2741 case LibFunc_nearbyintl: 2742 return Intrinsic::nearbyint; 2743 case LibFunc_round: 2744 case LibFunc_roundf: 2745 case LibFunc_roundl: 2746 return Intrinsic::round; 2747 case LibFunc_pow: 2748 case LibFunc_powf: 2749 case LibFunc_powl: 2750 return Intrinsic::pow; 2751 case LibFunc_sqrt: 2752 case LibFunc_sqrtf: 2753 case LibFunc_sqrtl: 2754 return Intrinsic::sqrt; 2755 } 2756 2757 return Intrinsic::not_intrinsic; 2758 } 2759 2760 /// Return true if we can prove that the specified FP value is never equal to 2761 /// -0.0. 2762 /// 2763 /// NOTE: this function will need to be revisited when we support non-default 2764 /// rounding modes! 2765 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 2766 unsigned Depth) { 2767 if (auto *CFP = dyn_cast<ConstantFP>(V)) 2768 return !CFP->getValueAPF().isNegZero(); 2769 2770 // Limit search depth. 2771 if (Depth == MaxDepth) 2772 return false; 2773 2774 auto *Op = dyn_cast<Operator>(V); 2775 if (!Op) 2776 return false; 2777 2778 // Check if the nsz fast-math flag is set. 2779 if (auto *FPO = dyn_cast<FPMathOperator>(Op)) 2780 if (FPO->hasNoSignedZeros()) 2781 return true; 2782 2783 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. 2784 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP()))) 2785 return true; 2786 2787 // sitofp and uitofp turn into +0.0 for zero. 2788 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) 2789 return true; 2790 2791 if (auto *Call = dyn_cast<CallInst>(Op)) { 2792 Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI); 2793 switch (IID) { 2794 default: 2795 break; 2796 // sqrt(-0.0) = -0.0, no other negative results are possible. 2797 case Intrinsic::sqrt: 2798 case Intrinsic::canonicalize: 2799 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); 2800 // fabs(x) != -0.0 2801 case Intrinsic::fabs: 2802 return true; 2803 } 2804 } 2805 2806 return false; 2807 } 2808 2809 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 2810 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 2811 /// bit despite comparing equal. 2812 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 2813 const TargetLibraryInfo *TLI, 2814 bool SignBitOnly, 2815 unsigned Depth) { 2816 // TODO: This function does not do the right thing when SignBitOnly is true 2817 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 2818 // which flips the sign bits of NaNs. See 2819 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2820 2821 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2822 return !CFP->getValueAPF().isNegative() || 2823 (!SignBitOnly && CFP->getValueAPF().isZero()); 2824 } 2825 2826 // Handle vector of constants. 2827 if (auto *CV = dyn_cast<Constant>(V)) { 2828 if (CV->getType()->isVectorTy()) { 2829 unsigned NumElts = CV->getType()->getVectorNumElements(); 2830 for (unsigned i = 0; i != NumElts; ++i) { 2831 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 2832 if (!CFP) 2833 return false; 2834 if (CFP->getValueAPF().isNegative() && 2835 (SignBitOnly || !CFP->getValueAPF().isZero())) 2836 return false; 2837 } 2838 2839 // All non-negative ConstantFPs. 2840 return true; 2841 } 2842 } 2843 2844 if (Depth == MaxDepth) 2845 return false; // Limit search depth. 2846 2847 const Operator *I = dyn_cast<Operator>(V); 2848 if (!I) 2849 return false; 2850 2851 switch (I->getOpcode()) { 2852 default: 2853 break; 2854 // Unsigned integers are always nonnegative. 2855 case Instruction::UIToFP: 2856 return true; 2857 case Instruction::FMul: 2858 // x*x is always non-negative or a NaN. 2859 if (I->getOperand(0) == I->getOperand(1) && 2860 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 2861 return true; 2862 2863 LLVM_FALLTHROUGH; 2864 case Instruction::FAdd: 2865 case Instruction::FDiv: 2866 case Instruction::FRem: 2867 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2868 Depth + 1) && 2869 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2870 Depth + 1); 2871 case Instruction::Select: 2872 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2873 Depth + 1) && 2874 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2875 Depth + 1); 2876 case Instruction::FPExt: 2877 case Instruction::FPTrunc: 2878 // Widening/narrowing never change sign. 2879 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2880 Depth + 1); 2881 case Instruction::ExtractElement: 2882 // Look through extract element. At the moment we keep this simple and skip 2883 // tracking the specific element. But at least we might find information 2884 // valid for all elements of the vector. 2885 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2886 Depth + 1); 2887 case Instruction::Call: 2888 const auto *CI = cast<CallInst>(I); 2889 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2890 switch (IID) { 2891 default: 2892 break; 2893 case Intrinsic::maxnum: 2894 return (isKnownNeverNaN(I->getOperand(0), TLI) && 2895 cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, 2896 SignBitOnly, Depth + 1)) || 2897 (isKnownNeverNaN(I->getOperand(1), TLI) && 2898 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, 2899 SignBitOnly, Depth + 1)); 2900 2901 case Intrinsic::minnum: 2902 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2903 Depth + 1) && 2904 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2905 Depth + 1); 2906 case Intrinsic::exp: 2907 case Intrinsic::exp2: 2908 case Intrinsic::fabs: 2909 return true; 2910 2911 case Intrinsic::sqrt: 2912 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 2913 if (!SignBitOnly) 2914 return true; 2915 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 2916 CannotBeNegativeZero(CI->getOperand(0), TLI)); 2917 2918 case Intrinsic::powi: 2919 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 2920 // powi(x,n) is non-negative if n is even. 2921 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 2922 return true; 2923 } 2924 // TODO: This is not correct. Given that exp is an integer, here are the 2925 // ways that pow can return a negative value: 2926 // 2927 // pow(x, exp) --> negative if exp is odd and x is negative. 2928 // pow(-0, exp) --> -inf if exp is negative odd. 2929 // pow(-0, exp) --> -0 if exp is positive odd. 2930 // pow(-inf, exp) --> -0 if exp is negative odd. 2931 // pow(-inf, exp) --> -inf if exp is positive odd. 2932 // 2933 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 2934 // but we must return false if x == -0. Unfortunately we do not currently 2935 // have a way of expressing this constraint. See details in 2936 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2937 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2938 Depth + 1); 2939 2940 case Intrinsic::fma: 2941 case Intrinsic::fmuladd: 2942 // x*x+y is non-negative if y is non-negative. 2943 return I->getOperand(0) == I->getOperand(1) && 2944 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 2945 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2946 Depth + 1); 2947 } 2948 break; 2949 } 2950 return false; 2951 } 2952 2953 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 2954 const TargetLibraryInfo *TLI) { 2955 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 2956 } 2957 2958 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 2959 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 2960 } 2961 2962 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, 2963 unsigned Depth) { 2964 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"); 2965 2966 // If we're told that NaNs won't happen, assume they won't. 2967 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 2968 if (FPMathOp->hasNoNaNs()) 2969 return true; 2970 2971 // Handle scalar constants. 2972 if (auto *CFP = dyn_cast<ConstantFP>(V)) 2973 return !CFP->isNaN(); 2974 2975 if (Depth == MaxDepth) 2976 return false; 2977 2978 if (auto *Inst = dyn_cast<Instruction>(V)) { 2979 switch (Inst->getOpcode()) { 2980 case Instruction::FAdd: 2981 case Instruction::FMul: 2982 case Instruction::FSub: 2983 case Instruction::FDiv: 2984 case Instruction::FRem: { 2985 // TODO: Need isKnownNeverInfinity 2986 return false; 2987 } 2988 case Instruction::Select: { 2989 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 2990 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1); 2991 } 2992 case Instruction::SIToFP: 2993 case Instruction::UIToFP: 2994 return true; 2995 case Instruction::FPTrunc: 2996 case Instruction::FPExt: 2997 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1); 2998 default: 2999 break; 3000 } 3001 } 3002 3003 if (const auto *II = dyn_cast<IntrinsicInst>(V)) { 3004 switch (II->getIntrinsicID()) { 3005 case Intrinsic::canonicalize: 3006 case Intrinsic::fabs: 3007 case Intrinsic::copysign: 3008 case Intrinsic::exp: 3009 case Intrinsic::exp2: 3010 case Intrinsic::floor: 3011 case Intrinsic::ceil: 3012 case Intrinsic::trunc: 3013 case Intrinsic::rint: 3014 case Intrinsic::nearbyint: 3015 case Intrinsic::round: 3016 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1); 3017 case Intrinsic::sqrt: 3018 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) && 3019 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI); 3020 default: 3021 return false; 3022 } 3023 } 3024 3025 // Bail out for constant expressions, but try to handle vector constants. 3026 if (!V->getType()->isVectorTy() || !isa<Constant>(V)) 3027 return false; 3028 3029 // For vectors, verify that each element is not NaN. 3030 unsigned NumElts = V->getType()->getVectorNumElements(); 3031 for (unsigned i = 0; i != NumElts; ++i) { 3032 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 3033 if (!Elt) 3034 return false; 3035 if (isa<UndefValue>(Elt)) 3036 continue; 3037 auto *CElt = dyn_cast<ConstantFP>(Elt); 3038 if (!CElt || CElt->isNaN()) 3039 return false; 3040 } 3041 // All elements were confirmed not-NaN or undefined. 3042 return true; 3043 } 3044 3045 Value *llvm::isBytewiseValue(Value *V) { 3046 3047 // All byte-wide stores are splatable, even of arbitrary variables. 3048 if (V->getType()->isIntegerTy(8)) 3049 return V; 3050 3051 LLVMContext &Ctx = V->getContext(); 3052 3053 // Undef don't care. 3054 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx)); 3055 if (isa<UndefValue>(V)) 3056 return UndefInt8; 3057 3058 Constant *C = dyn_cast<Constant>(V); 3059 if (!C) { 3060 // Conceptually, we could handle things like: 3061 // %a = zext i8 %X to i16 3062 // %b = shl i16 %a, 8 3063 // %c = or i16 %a, %b 3064 // but until there is an example that actually needs this, it doesn't seem 3065 // worth worrying about. 3066 return nullptr; 3067 } 3068 3069 // Handle 'null' ConstantArrayZero etc. 3070 if (C->isNullValue()) 3071 return Constant::getNullValue(Type::getInt8Ty(Ctx)); 3072 3073 // Constant floating-point values can be handled as integer values if the 3074 // corresponding integer value is "byteable". An important case is 0.0. 3075 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 3076 Type *Ty = nullptr; 3077 if (CFP->getType()->isHalfTy()) 3078 Ty = Type::getInt16Ty(Ctx); 3079 else if (CFP->getType()->isFloatTy()) 3080 Ty = Type::getInt32Ty(Ctx); 3081 else if (CFP->getType()->isDoubleTy()) 3082 Ty = Type::getInt64Ty(Ctx); 3083 // Don't handle long double formats, which have strange constraints. 3084 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty)) : nullptr; 3085 } 3086 3087 // We can handle constant integers that are multiple of 8 bits. 3088 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 3089 if (CI->getBitWidth() % 8 == 0) { 3090 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 3091 if (!CI->getValue().isSplat(8)) 3092 return nullptr; 3093 return ConstantInt::get(Ctx, CI->getValue().trunc(8)); 3094 } 3095 } 3096 3097 auto Merge = [&](Value *LHS, Value *RHS) -> Value * { 3098 if (LHS == RHS) 3099 return LHS; 3100 if (!LHS || !RHS) 3101 return nullptr; 3102 if (LHS == UndefInt8) 3103 return RHS; 3104 if (RHS == UndefInt8) 3105 return LHS; 3106 return nullptr; 3107 }; 3108 3109 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) { 3110 Value *Val = UndefInt8; 3111 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) 3112 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I))))) 3113 return nullptr; 3114 return Val; 3115 } 3116 3117 if (isa<ConstantVector>(C)) { 3118 Constant *Splat = cast<ConstantVector>(C)->getSplatValue(); 3119 return Splat ? isBytewiseValue(Splat) : nullptr; 3120 } 3121 3122 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) { 3123 Value *Val = UndefInt8; 3124 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) 3125 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I))))) 3126 return nullptr; 3127 return Val; 3128 } 3129 3130 // Don't try to handle the handful of other constants. 3131 return nullptr; 3132 } 3133 3134 // This is the recursive version of BuildSubAggregate. It takes a few different 3135 // arguments. Idxs is the index within the nested struct From that we are 3136 // looking at now (which is of type IndexedType). IdxSkip is the number of 3137 // indices from Idxs that should be left out when inserting into the resulting 3138 // struct. To is the result struct built so far, new insertvalue instructions 3139 // build on that. 3140 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 3141 SmallVectorImpl<unsigned> &Idxs, 3142 unsigned IdxSkip, 3143 Instruction *InsertBefore) { 3144 StructType *STy = dyn_cast<StructType>(IndexedType); 3145 if (STy) { 3146 // Save the original To argument so we can modify it 3147 Value *OrigTo = To; 3148 // General case, the type indexed by Idxs is a struct 3149 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3150 // Process each struct element recursively 3151 Idxs.push_back(i); 3152 Value *PrevTo = To; 3153 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 3154 InsertBefore); 3155 Idxs.pop_back(); 3156 if (!To) { 3157 // Couldn't find any inserted value for this index? Cleanup 3158 while (PrevTo != OrigTo) { 3159 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 3160 PrevTo = Del->getAggregateOperand(); 3161 Del->eraseFromParent(); 3162 } 3163 // Stop processing elements 3164 break; 3165 } 3166 } 3167 // If we successfully found a value for each of our subaggregates 3168 if (To) 3169 return To; 3170 } 3171 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 3172 // the struct's elements had a value that was inserted directly. In the latter 3173 // case, perhaps we can't determine each of the subelements individually, but 3174 // we might be able to find the complete struct somewhere. 3175 3176 // Find the value that is at that particular spot 3177 Value *V = FindInsertedValue(From, Idxs); 3178 3179 if (!V) 3180 return nullptr; 3181 3182 // Insert the value in the new (sub) aggregate 3183 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 3184 "tmp", InsertBefore); 3185 } 3186 3187 // This helper takes a nested struct and extracts a part of it (which is again a 3188 // struct) into a new value. For example, given the struct: 3189 // { a, { b, { c, d }, e } } 3190 // and the indices "1, 1" this returns 3191 // { c, d }. 3192 // 3193 // It does this by inserting an insertvalue for each element in the resulting 3194 // struct, as opposed to just inserting a single struct. This will only work if 3195 // each of the elements of the substruct are known (ie, inserted into From by an 3196 // insertvalue instruction somewhere). 3197 // 3198 // All inserted insertvalue instructions are inserted before InsertBefore 3199 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 3200 Instruction *InsertBefore) { 3201 assert(InsertBefore && "Must have someplace to insert!"); 3202 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 3203 idx_range); 3204 Value *To = UndefValue::get(IndexedType); 3205 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 3206 unsigned IdxSkip = Idxs.size(); 3207 3208 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 3209 } 3210 3211 /// Given an aggregate and a sequence of indices, see if the scalar value 3212 /// indexed is already around as a register, for example if it was inserted 3213 /// directly into the aggregate. 3214 /// 3215 /// If InsertBefore is not null, this function will duplicate (modified) 3216 /// insertvalues when a part of a nested struct is extracted. 3217 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 3218 Instruction *InsertBefore) { 3219 // Nothing to index? Just return V then (this is useful at the end of our 3220 // recursion). 3221 if (idx_range.empty()) 3222 return V; 3223 // We have indices, so V should have an indexable type. 3224 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 3225 "Not looking at a struct or array?"); 3226 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 3227 "Invalid indices for type?"); 3228 3229 if (Constant *C = dyn_cast<Constant>(V)) { 3230 C = C->getAggregateElement(idx_range[0]); 3231 if (!C) return nullptr; 3232 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 3233 } 3234 3235 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 3236 // Loop the indices for the insertvalue instruction in parallel with the 3237 // requested indices 3238 const unsigned *req_idx = idx_range.begin(); 3239 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 3240 i != e; ++i, ++req_idx) { 3241 if (req_idx == idx_range.end()) { 3242 // We can't handle this without inserting insertvalues 3243 if (!InsertBefore) 3244 return nullptr; 3245 3246 // The requested index identifies a part of a nested aggregate. Handle 3247 // this specially. For example, 3248 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 3249 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 3250 // %C = extractvalue {i32, { i32, i32 } } %B, 1 3251 // This can be changed into 3252 // %A = insertvalue {i32, i32 } undef, i32 10, 0 3253 // %C = insertvalue {i32, i32 } %A, i32 11, 1 3254 // which allows the unused 0,0 element from the nested struct to be 3255 // removed. 3256 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 3257 InsertBefore); 3258 } 3259 3260 // This insert value inserts something else than what we are looking for. 3261 // See if the (aggregate) value inserted into has the value we are 3262 // looking for, then. 3263 if (*req_idx != *i) 3264 return FindInsertedValue(I->getAggregateOperand(), idx_range, 3265 InsertBefore); 3266 } 3267 // If we end up here, the indices of the insertvalue match with those 3268 // requested (though possibly only partially). Now we recursively look at 3269 // the inserted value, passing any remaining indices. 3270 return FindInsertedValue(I->getInsertedValueOperand(), 3271 makeArrayRef(req_idx, idx_range.end()), 3272 InsertBefore); 3273 } 3274 3275 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 3276 // If we're extracting a value from an aggregate that was extracted from 3277 // something else, we can extract from that something else directly instead. 3278 // However, we will need to chain I's indices with the requested indices. 3279 3280 // Calculate the number of indices required 3281 unsigned size = I->getNumIndices() + idx_range.size(); 3282 // Allocate some space to put the new indices in 3283 SmallVector<unsigned, 5> Idxs; 3284 Idxs.reserve(size); 3285 // Add indices from the extract value instruction 3286 Idxs.append(I->idx_begin(), I->idx_end()); 3287 3288 // Add requested indices 3289 Idxs.append(idx_range.begin(), idx_range.end()); 3290 3291 assert(Idxs.size() == size 3292 && "Number of indices added not correct?"); 3293 3294 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 3295 } 3296 // Otherwise, we don't know (such as, extracting from a function return value 3297 // or load instruction) 3298 return nullptr; 3299 } 3300 3301 /// Analyze the specified pointer to see if it can be expressed as a base 3302 /// pointer plus a constant offset. Return the base and offset to the caller. 3303 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 3304 const DataLayout &DL) { 3305 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType()); 3306 APInt ByteOffset(BitWidth, 0); 3307 3308 // We walk up the defs but use a visited set to handle unreachable code. In 3309 // that case, we stop after accumulating the cycle once (not that it 3310 // matters). 3311 SmallPtrSet<Value *, 16> Visited; 3312 while (Visited.insert(Ptr).second) { 3313 if (Ptr->getType()->isVectorTy()) 3314 break; 3315 3316 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 3317 // If one of the values we have visited is an addrspacecast, then 3318 // the pointer type of this GEP may be different from the type 3319 // of the Ptr parameter which was passed to this function. This 3320 // means when we construct GEPOffset, we need to use the size 3321 // of GEP's pointer type rather than the size of the original 3322 // pointer type. 3323 APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 3324 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 3325 break; 3326 3327 ByteOffset += GEPOffset.getSExtValue(); 3328 3329 Ptr = GEP->getPointerOperand(); 3330 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 3331 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 3332 Ptr = cast<Operator>(Ptr)->getOperand(0); 3333 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 3334 if (GA->isInterposable()) 3335 break; 3336 Ptr = GA->getAliasee(); 3337 } else { 3338 break; 3339 } 3340 } 3341 Offset = ByteOffset.getSExtValue(); 3342 return Ptr; 3343 } 3344 3345 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 3346 unsigned CharSize) { 3347 // Make sure the GEP has exactly three arguments. 3348 if (GEP->getNumOperands() != 3) 3349 return false; 3350 3351 // Make sure the index-ee is a pointer to array of \p CharSize integers. 3352 // CharSize. 3353 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 3354 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 3355 return false; 3356 3357 // Check to make sure that the first operand of the GEP is an integer and 3358 // has value 0 so that we are sure we're indexing into the initializer. 3359 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 3360 if (!FirstIdx || !FirstIdx->isZero()) 3361 return false; 3362 3363 return true; 3364 } 3365 3366 bool llvm::getConstantDataArrayInfo(const Value *V, 3367 ConstantDataArraySlice &Slice, 3368 unsigned ElementSize, uint64_t Offset) { 3369 assert(V); 3370 3371 // Look through bitcast instructions and geps. 3372 V = V->stripPointerCasts(); 3373 3374 // If the value is a GEP instruction or constant expression, treat it as an 3375 // offset. 3376 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3377 // The GEP operator should be based on a pointer to string constant, and is 3378 // indexing into the string constant. 3379 if (!isGEPBasedOnPointerToString(GEP, ElementSize)) 3380 return false; 3381 3382 // If the second index isn't a ConstantInt, then this is a variable index 3383 // into the array. If this occurs, we can't say anything meaningful about 3384 // the string. 3385 uint64_t StartIdx = 0; 3386 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 3387 StartIdx = CI->getZExtValue(); 3388 else 3389 return false; 3390 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize, 3391 StartIdx + Offset); 3392 } 3393 3394 // The GEP instruction, constant or instruction, must reference a global 3395 // variable that is a constant and is initialized. The referenced constant 3396 // initializer is the array that we'll use for optimization. 3397 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 3398 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 3399 return false; 3400 3401 const ConstantDataArray *Array; 3402 ArrayType *ArrayTy; 3403 if (GV->getInitializer()->isNullValue()) { 3404 Type *GVTy = GV->getValueType(); 3405 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) { 3406 // A zeroinitializer for the array; there is no ConstantDataArray. 3407 Array = nullptr; 3408 } else { 3409 const DataLayout &DL = GV->getParent()->getDataLayout(); 3410 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy); 3411 uint64_t Length = SizeInBytes / (ElementSize / 8); 3412 if (Length <= Offset) 3413 return false; 3414 3415 Slice.Array = nullptr; 3416 Slice.Offset = 0; 3417 Slice.Length = Length - Offset; 3418 return true; 3419 } 3420 } else { 3421 // This must be a ConstantDataArray. 3422 Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 3423 if (!Array) 3424 return false; 3425 ArrayTy = Array->getType(); 3426 } 3427 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize)) 3428 return false; 3429 3430 uint64_t NumElts = ArrayTy->getArrayNumElements(); 3431 if (Offset > NumElts) 3432 return false; 3433 3434 Slice.Array = Array; 3435 Slice.Offset = Offset; 3436 Slice.Length = NumElts - Offset; 3437 return true; 3438 } 3439 3440 /// This function computes the length of a null-terminated C string pointed to 3441 /// by V. If successful, it returns true and returns the string in Str. 3442 /// If unsuccessful, it returns false. 3443 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 3444 uint64_t Offset, bool TrimAtNul) { 3445 ConstantDataArraySlice Slice; 3446 if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) 3447 return false; 3448 3449 if (Slice.Array == nullptr) { 3450 if (TrimAtNul) { 3451 Str = StringRef(); 3452 return true; 3453 } 3454 if (Slice.Length == 1) { 3455 Str = StringRef("", 1); 3456 return true; 3457 } 3458 // We cannot instantiate a StringRef as we do not have an appropriate string 3459 // of 0s at hand. 3460 return false; 3461 } 3462 3463 // Start out with the entire array in the StringRef. 3464 Str = Slice.Array->getAsString(); 3465 // Skip over 'offset' bytes. 3466 Str = Str.substr(Slice.Offset); 3467 3468 if (TrimAtNul) { 3469 // Trim off the \0 and anything after it. If the array is not nul 3470 // terminated, we just return the whole end of string. The client may know 3471 // some other way that the string is length-bound. 3472 Str = Str.substr(0, Str.find('\0')); 3473 } 3474 return true; 3475 } 3476 3477 // These next two are very similar to the above, but also look through PHI 3478 // nodes. 3479 // TODO: See if we can integrate these two together. 3480 3481 /// If we can compute the length of the string pointed to by 3482 /// the specified pointer, return 'len+1'. If we can't, return 0. 3483 static uint64_t GetStringLengthH(const Value *V, 3484 SmallPtrSetImpl<const PHINode*> &PHIs, 3485 unsigned CharSize) { 3486 // Look through noop bitcast instructions. 3487 V = V->stripPointerCasts(); 3488 3489 // If this is a PHI node, there are two cases: either we have already seen it 3490 // or we haven't. 3491 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 3492 if (!PHIs.insert(PN).second) 3493 return ~0ULL; // already in the set. 3494 3495 // If it was new, see if all the input strings are the same length. 3496 uint64_t LenSoFar = ~0ULL; 3497 for (Value *IncValue : PN->incoming_values()) { 3498 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 3499 if (Len == 0) return 0; // Unknown length -> unknown. 3500 3501 if (Len == ~0ULL) continue; 3502 3503 if (Len != LenSoFar && LenSoFar != ~0ULL) 3504 return 0; // Disagree -> unknown. 3505 LenSoFar = Len; 3506 } 3507 3508 // Success, all agree. 3509 return LenSoFar; 3510 } 3511 3512 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 3513 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 3514 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 3515 if (Len1 == 0) return 0; 3516 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 3517 if (Len2 == 0) return 0; 3518 if (Len1 == ~0ULL) return Len2; 3519 if (Len2 == ~0ULL) return Len1; 3520 if (Len1 != Len2) return 0; 3521 return Len1; 3522 } 3523 3524 // Otherwise, see if we can read the string. 3525 ConstantDataArraySlice Slice; 3526 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 3527 return 0; 3528 3529 if (Slice.Array == nullptr) 3530 return 1; 3531 3532 // Search for nul characters 3533 unsigned NullIndex = 0; 3534 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 3535 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 3536 break; 3537 } 3538 3539 return NullIndex + 1; 3540 } 3541 3542 /// If we can compute the length of the string pointed to by 3543 /// the specified pointer, return 'len+1'. If we can't, return 0. 3544 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 3545 if (!V->getType()->isPointerTy()) 3546 return 0; 3547 3548 SmallPtrSet<const PHINode*, 32> PHIs; 3549 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 3550 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 3551 // an empty string as a length. 3552 return Len == ~0ULL ? 1 : Len; 3553 } 3554 3555 const Value *llvm::getArgumentAliasingToReturnedPointer(ImmutableCallSite CS) { 3556 assert(CS && 3557 "getArgumentAliasingToReturnedPointer only works on nonnull CallSite"); 3558 if (const Value *RV = CS.getReturnedArgOperand()) 3559 return RV; 3560 // This can be used only as a aliasing property. 3561 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(CS)) 3562 return CS.getArgOperand(0); 3563 return nullptr; 3564 } 3565 3566 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 3567 ImmutableCallSite CS) { 3568 return CS.getIntrinsicID() == Intrinsic::launder_invariant_group || 3569 CS.getIntrinsicID() == Intrinsic::strip_invariant_group; 3570 } 3571 3572 /// \p PN defines a loop-variant pointer to an object. Check if the 3573 /// previous iteration of the loop was referring to the same object as \p PN. 3574 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 3575 const LoopInfo *LI) { 3576 // Find the loop-defined value. 3577 Loop *L = LI->getLoopFor(PN->getParent()); 3578 if (PN->getNumIncomingValues() != 2) 3579 return true; 3580 3581 // Find the value from previous iteration. 3582 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 3583 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3584 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 3585 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3586 return true; 3587 3588 // If a new pointer is loaded in the loop, the pointer references a different 3589 // object in every iteration. E.g.: 3590 // for (i) 3591 // int *p = a[i]; 3592 // ... 3593 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 3594 if (!L->isLoopInvariant(Load->getPointerOperand())) 3595 return false; 3596 return true; 3597 } 3598 3599 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 3600 unsigned MaxLookup) { 3601 if (!V->getType()->isPointerTy()) 3602 return V; 3603 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 3604 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3605 V = GEP->getPointerOperand(); 3606 } else if (Operator::getOpcode(V) == Instruction::BitCast || 3607 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 3608 V = cast<Operator>(V)->getOperand(0); 3609 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 3610 if (GA->isInterposable()) 3611 return V; 3612 V = GA->getAliasee(); 3613 } else if (isa<AllocaInst>(V)) { 3614 // An alloca can't be further simplified. 3615 return V; 3616 } else { 3617 if (auto CS = CallSite(V)) { 3618 // CaptureTracking can know about special capturing properties of some 3619 // intrinsics like launder.invariant.group, that can't be expressed with 3620 // the attributes, but have properties like returning aliasing pointer. 3621 // Because some analysis may assume that nocaptured pointer is not 3622 // returned from some special intrinsic (because function would have to 3623 // be marked with returns attribute), it is crucial to use this function 3624 // because it should be in sync with CaptureTracking. Not using it may 3625 // cause weird miscompilations where 2 aliasing pointers are assumed to 3626 // noalias. 3627 if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) { 3628 V = RP; 3629 continue; 3630 } 3631 } 3632 3633 // See if InstructionSimplify knows any relevant tricks. 3634 if (Instruction *I = dyn_cast<Instruction>(V)) 3635 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 3636 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) { 3637 V = Simplified; 3638 continue; 3639 } 3640 3641 return V; 3642 } 3643 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 3644 } 3645 return V; 3646 } 3647 3648 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 3649 const DataLayout &DL, LoopInfo *LI, 3650 unsigned MaxLookup) { 3651 SmallPtrSet<Value *, 4> Visited; 3652 SmallVector<Value *, 4> Worklist; 3653 Worklist.push_back(V); 3654 do { 3655 Value *P = Worklist.pop_back_val(); 3656 P = GetUnderlyingObject(P, DL, MaxLookup); 3657 3658 if (!Visited.insert(P).second) 3659 continue; 3660 3661 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 3662 Worklist.push_back(SI->getTrueValue()); 3663 Worklist.push_back(SI->getFalseValue()); 3664 continue; 3665 } 3666 3667 if (PHINode *PN = dyn_cast<PHINode>(P)) { 3668 // If this PHI changes the underlying object in every iteration of the 3669 // loop, don't look through it. Consider: 3670 // int **A; 3671 // for (i) { 3672 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 3673 // Curr = A[i]; 3674 // *Prev, *Curr; 3675 // 3676 // Prev is tracking Curr one iteration behind so they refer to different 3677 // underlying objects. 3678 if (!LI || !LI->isLoopHeader(PN->getParent()) || 3679 isSameUnderlyingObjectInLoop(PN, LI)) 3680 for (Value *IncValue : PN->incoming_values()) 3681 Worklist.push_back(IncValue); 3682 continue; 3683 } 3684 3685 Objects.push_back(P); 3686 } while (!Worklist.empty()); 3687 } 3688 3689 /// This is the function that does the work of looking through basic 3690 /// ptrtoint+arithmetic+inttoptr sequences. 3691 static const Value *getUnderlyingObjectFromInt(const Value *V) { 3692 do { 3693 if (const Operator *U = dyn_cast<Operator>(V)) { 3694 // If we find a ptrtoint, we can transfer control back to the 3695 // regular getUnderlyingObjectFromInt. 3696 if (U->getOpcode() == Instruction::PtrToInt) 3697 return U->getOperand(0); 3698 // If we find an add of a constant, a multiplied value, or a phi, it's 3699 // likely that the other operand will lead us to the base 3700 // object. We don't have to worry about the case where the 3701 // object address is somehow being computed by the multiply, 3702 // because our callers only care when the result is an 3703 // identifiable object. 3704 if (U->getOpcode() != Instruction::Add || 3705 (!isa<ConstantInt>(U->getOperand(1)) && 3706 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 3707 !isa<PHINode>(U->getOperand(1)))) 3708 return V; 3709 V = U->getOperand(0); 3710 } else { 3711 return V; 3712 } 3713 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 3714 } while (true); 3715 } 3716 3717 /// This is a wrapper around GetUnderlyingObjects and adds support for basic 3718 /// ptrtoint+arithmetic+inttoptr sequences. 3719 /// It returns false if unidentified object is found in GetUnderlyingObjects. 3720 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, 3721 SmallVectorImpl<Value *> &Objects, 3722 const DataLayout &DL) { 3723 SmallPtrSet<const Value *, 16> Visited; 3724 SmallVector<const Value *, 4> Working(1, V); 3725 do { 3726 V = Working.pop_back_val(); 3727 3728 SmallVector<Value *, 4> Objs; 3729 GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL); 3730 3731 for (Value *V : Objs) { 3732 if (!Visited.insert(V).second) 3733 continue; 3734 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 3735 const Value *O = 3736 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 3737 if (O->getType()->isPointerTy()) { 3738 Working.push_back(O); 3739 continue; 3740 } 3741 } 3742 // If GetUnderlyingObjects fails to find an identifiable object, 3743 // getUnderlyingObjectsForCodeGen also fails for safety. 3744 if (!isIdentifiedObject(V)) { 3745 Objects.clear(); 3746 return false; 3747 } 3748 Objects.push_back(const_cast<Value *>(V)); 3749 } 3750 } while (!Working.empty()); 3751 return true; 3752 } 3753 3754 /// Return true if the only users of this pointer are lifetime markers. 3755 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 3756 for (const User *U : V->users()) { 3757 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3758 if (!II) return false; 3759 3760 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 3761 II->getIntrinsicID() != Intrinsic::lifetime_end) 3762 return false; 3763 } 3764 return true; 3765 } 3766 3767 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3768 const Instruction *CtxI, 3769 const DominatorTree *DT) { 3770 const Operator *Inst = dyn_cast<Operator>(V); 3771 if (!Inst) 3772 return false; 3773 3774 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3775 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3776 if (C->canTrap()) 3777 return false; 3778 3779 switch (Inst->getOpcode()) { 3780 default: 3781 return true; 3782 case Instruction::UDiv: 3783 case Instruction::URem: { 3784 // x / y is undefined if y == 0. 3785 const APInt *V; 3786 if (match(Inst->getOperand(1), m_APInt(V))) 3787 return *V != 0; 3788 return false; 3789 } 3790 case Instruction::SDiv: 3791 case Instruction::SRem: { 3792 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3793 const APInt *Numerator, *Denominator; 3794 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3795 return false; 3796 // We cannot hoist this division if the denominator is 0. 3797 if (*Denominator == 0) 3798 return false; 3799 // It's safe to hoist if the denominator is not 0 or -1. 3800 if (*Denominator != -1) 3801 return true; 3802 // At this point we know that the denominator is -1. It is safe to hoist as 3803 // long we know that the numerator is not INT_MIN. 3804 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3805 return !Numerator->isMinSignedValue(); 3806 // The numerator *might* be MinSignedValue. 3807 return false; 3808 } 3809 case Instruction::Load: { 3810 const LoadInst *LI = cast<LoadInst>(Inst); 3811 if (!LI->isUnordered() || 3812 // Speculative load may create a race that did not exist in the source. 3813 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) || 3814 // Speculative load may load data from dirty regions. 3815 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 3816 LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 3817 return false; 3818 const DataLayout &DL = LI->getModule()->getDataLayout(); 3819 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), 3820 LI->getAlignment(), DL, CtxI, DT); 3821 } 3822 case Instruction::Call: { 3823 auto *CI = cast<const CallInst>(Inst); 3824 const Function *Callee = CI->getCalledFunction(); 3825 3826 // The called function could have undefined behavior or side-effects, even 3827 // if marked readnone nounwind. 3828 return Callee && Callee->isSpeculatable(); 3829 } 3830 case Instruction::VAArg: 3831 case Instruction::Alloca: 3832 case Instruction::Invoke: 3833 case Instruction::PHI: 3834 case Instruction::Store: 3835 case Instruction::Ret: 3836 case Instruction::Br: 3837 case Instruction::IndirectBr: 3838 case Instruction::Switch: 3839 case Instruction::Unreachable: 3840 case Instruction::Fence: 3841 case Instruction::AtomicRMW: 3842 case Instruction::AtomicCmpXchg: 3843 case Instruction::LandingPad: 3844 case Instruction::Resume: 3845 case Instruction::CatchSwitch: 3846 case Instruction::CatchPad: 3847 case Instruction::CatchRet: 3848 case Instruction::CleanupPad: 3849 case Instruction::CleanupRet: 3850 return false; // Misc instructions which have effects 3851 } 3852 } 3853 3854 bool llvm::mayBeMemoryDependent(const Instruction &I) { 3855 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 3856 } 3857 3858 OverflowResult llvm::computeOverflowForUnsignedMul( 3859 const Value *LHS, const Value *RHS, const DataLayout &DL, 3860 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 3861 bool UseInstrInfo) { 3862 // Multiplying n * m significant bits yields a result of n + m significant 3863 // bits. If the total number of significant bits does not exceed the 3864 // result bit width (minus 1), there is no overflow. 3865 // This means if we have enough leading zero bits in the operands 3866 // we can guarantee that the result does not overflow. 3867 // Ref: "Hacker's Delight" by Henry Warren 3868 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3869 KnownBits LHSKnown(BitWidth); 3870 KnownBits RHSKnown(BitWidth); 3871 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr, 3872 UseInstrInfo); 3873 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr, 3874 UseInstrInfo); 3875 // Note that underestimating the number of zero bits gives a more 3876 // conservative answer. 3877 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() + 3878 RHSKnown.countMinLeadingZeros(); 3879 // First handle the easy case: if we have enough zero bits there's 3880 // definitely no overflow. 3881 if (ZeroBits >= BitWidth) 3882 return OverflowResult::NeverOverflows; 3883 3884 // Get the largest possible values for each operand. 3885 APInt LHSMax = ~LHSKnown.Zero; 3886 APInt RHSMax = ~RHSKnown.Zero; 3887 3888 // We know the multiply operation doesn't overflow if the maximum values for 3889 // each operand will not overflow after we multiply them together. 3890 bool MaxOverflow; 3891 (void)LHSMax.umul_ov(RHSMax, MaxOverflow); 3892 if (!MaxOverflow) 3893 return OverflowResult::NeverOverflows; 3894 3895 // We know it always overflows if multiplying the smallest possible values for 3896 // the operands also results in overflow. 3897 bool MinOverflow; 3898 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow); 3899 if (MinOverflow) 3900 return OverflowResult::AlwaysOverflows; 3901 3902 return OverflowResult::MayOverflow; 3903 } 3904 3905 OverflowResult 3906 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS, 3907 const DataLayout &DL, AssumptionCache *AC, 3908 const Instruction *CxtI, 3909 const DominatorTree *DT, bool UseInstrInfo) { 3910 // Multiplying n * m significant bits yields a result of n + m significant 3911 // bits. If the total number of significant bits does not exceed the 3912 // result bit width (minus 1), there is no overflow. 3913 // This means if we have enough leading sign bits in the operands 3914 // we can guarantee that the result does not overflow. 3915 // Ref: "Hacker's Delight" by Henry Warren 3916 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3917 3918 // Note that underestimating the number of sign bits gives a more 3919 // conservative answer. 3920 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + 3921 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); 3922 3923 // First handle the easy case: if we have enough sign bits there's 3924 // definitely no overflow. 3925 if (SignBits > BitWidth + 1) 3926 return OverflowResult::NeverOverflows; 3927 3928 // There are two ambiguous cases where there can be no overflow: 3929 // SignBits == BitWidth + 1 and 3930 // SignBits == BitWidth 3931 // The second case is difficult to check, therefore we only handle the 3932 // first case. 3933 if (SignBits == BitWidth + 1) { 3934 // It overflows only when both arguments are negative and the true 3935 // product is exactly the minimum negative number. 3936 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 3937 // For simplicity we just check if at least one side is not negative. 3938 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 3939 nullptr, UseInstrInfo); 3940 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 3941 nullptr, UseInstrInfo); 3942 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) 3943 return OverflowResult::NeverOverflows; 3944 } 3945 return OverflowResult::MayOverflow; 3946 } 3947 3948 OverflowResult llvm::computeOverflowForUnsignedAdd( 3949 const Value *LHS, const Value *RHS, const DataLayout &DL, 3950 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 3951 bool UseInstrInfo) { 3952 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 3953 nullptr, UseInstrInfo); 3954 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) { 3955 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 3956 nullptr, UseInstrInfo); 3957 3958 if (LHSKnown.isNegative() && RHSKnown.isNegative()) { 3959 // The sign bit is set in both cases: this MUST overflow. 3960 // Create a simple add instruction, and insert it into the struct. 3961 return OverflowResult::AlwaysOverflows; 3962 } 3963 3964 if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) { 3965 // The sign bit is clear in both cases: this CANNOT overflow. 3966 // Create a simple add instruction, and insert it into the struct. 3967 return OverflowResult::NeverOverflows; 3968 } 3969 } 3970 3971 return OverflowResult::MayOverflow; 3972 } 3973 3974 /// Return true if we can prove that adding the two values of the 3975 /// knownbits will not overflow. 3976 /// Otherwise return false. 3977 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown, 3978 const KnownBits &RHSKnown) { 3979 // Addition of two 2's complement numbers having opposite signs will never 3980 // overflow. 3981 if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) || 3982 (LHSKnown.isNonNegative() && RHSKnown.isNegative())) 3983 return true; 3984 3985 // If either of the values is known to be non-negative, adding them can only 3986 // overflow if the second is also non-negative, so we can assume that. 3987 // Two non-negative numbers will only overflow if there is a carry to the 3988 // sign bit, so we can check if even when the values are as big as possible 3989 // there is no overflow to the sign bit. 3990 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) { 3991 APInt MaxLHS = ~LHSKnown.Zero; 3992 MaxLHS.clearSignBit(); 3993 APInt MaxRHS = ~RHSKnown.Zero; 3994 MaxRHS.clearSignBit(); 3995 APInt Result = std::move(MaxLHS) + std::move(MaxRHS); 3996 return Result.isSignBitClear(); 3997 } 3998 3999 // If either of the values is known to be negative, adding them can only 4000 // overflow if the second is also negative, so we can assume that. 4001 // Two negative number will only overflow if there is no carry to the sign 4002 // bit, so we can check if even when the values are as small as possible 4003 // there is overflow to the sign bit. 4004 if (LHSKnown.isNegative() || RHSKnown.isNegative()) { 4005 APInt MinLHS = LHSKnown.One; 4006 MinLHS.clearSignBit(); 4007 APInt MinRHS = RHSKnown.One; 4008 MinRHS.clearSignBit(); 4009 APInt Result = std::move(MinLHS) + std::move(MinRHS); 4010 return Result.isSignBitSet(); 4011 } 4012 4013 // If we reached here it means that we know nothing about the sign bits. 4014 // In this case we can't know if there will be an overflow, since by 4015 // changing the sign bits any two values can be made to overflow. 4016 return false; 4017 } 4018 4019 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 4020 const Value *RHS, 4021 const AddOperator *Add, 4022 const DataLayout &DL, 4023 AssumptionCache *AC, 4024 const Instruction *CxtI, 4025 const DominatorTree *DT) { 4026 if (Add && Add->hasNoSignedWrap()) { 4027 return OverflowResult::NeverOverflows; 4028 } 4029 4030 // If LHS and RHS each have at least two sign bits, the addition will look 4031 // like 4032 // 4033 // XX..... + 4034 // YY..... 4035 // 4036 // If the carry into the most significant position is 0, X and Y can't both 4037 // be 1 and therefore the carry out of the addition is also 0. 4038 // 4039 // If the carry into the most significant position is 1, X and Y can't both 4040 // be 0 and therefore the carry out of the addition is also 1. 4041 // 4042 // Since the carry into the most significant position is always equal to 4043 // the carry out of the addition, there is no signed overflow. 4044 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4045 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4046 return OverflowResult::NeverOverflows; 4047 4048 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 4049 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 4050 4051 if (checkRippleForSignedAdd(LHSKnown, RHSKnown)) 4052 return OverflowResult::NeverOverflows; 4053 4054 // The remaining code needs Add to be available. Early returns if not so. 4055 if (!Add) 4056 return OverflowResult::MayOverflow; 4057 4058 // If the sign of Add is the same as at least one of the operands, this add 4059 // CANNOT overflow. This is particularly useful when the sum is 4060 // @llvm.assume'ed non-negative rather than proved so from analyzing its 4061 // operands. 4062 bool LHSOrRHSKnownNonNegative = 4063 (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()); 4064 bool LHSOrRHSKnownNegative = 4065 (LHSKnown.isNegative() || RHSKnown.isNegative()); 4066 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 4067 KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT); 4068 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 4069 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) { 4070 return OverflowResult::NeverOverflows; 4071 } 4072 } 4073 4074 return OverflowResult::MayOverflow; 4075 } 4076 4077 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, 4078 const Value *RHS, 4079 const DataLayout &DL, 4080 AssumptionCache *AC, 4081 const Instruction *CxtI, 4082 const DominatorTree *DT) { 4083 // If the LHS is negative and the RHS is non-negative, no unsigned wrap. 4084 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 4085 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 4086 if (LHSKnown.isNegative() && RHSKnown.isNonNegative()) 4087 return OverflowResult::NeverOverflows; 4088 4089 return OverflowResult::MayOverflow; 4090 } 4091 4092 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, 4093 const Value *RHS, 4094 const DataLayout &DL, 4095 AssumptionCache *AC, 4096 const Instruction *CxtI, 4097 const DominatorTree *DT) { 4098 // If LHS and RHS each have at least two sign bits, the subtraction 4099 // cannot overflow. 4100 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4101 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4102 return OverflowResult::NeverOverflows; 4103 4104 KnownBits LHSKnown = computeKnownBits(LHS, DL, 0, AC, CxtI, DT); 4105 4106 KnownBits RHSKnown = computeKnownBits(RHS, DL, 0, AC, CxtI, DT); 4107 4108 // Subtraction of two 2's complement numbers having identical signs will 4109 // never overflow. 4110 if ((LHSKnown.isNegative() && RHSKnown.isNegative()) || 4111 (LHSKnown.isNonNegative() && RHSKnown.isNonNegative())) 4112 return OverflowResult::NeverOverflows; 4113 4114 // TODO: implement logic similar to checkRippleForAdd 4115 return OverflowResult::MayOverflow; 4116 } 4117 4118 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, 4119 const DominatorTree &DT) { 4120 #ifndef NDEBUG 4121 auto IID = II->getIntrinsicID(); 4122 assert((IID == Intrinsic::sadd_with_overflow || 4123 IID == Intrinsic::uadd_with_overflow || 4124 IID == Intrinsic::ssub_with_overflow || 4125 IID == Intrinsic::usub_with_overflow || 4126 IID == Intrinsic::smul_with_overflow || 4127 IID == Intrinsic::umul_with_overflow) && 4128 "Not an overflow intrinsic!"); 4129 #endif 4130 4131 SmallVector<const BranchInst *, 2> GuardingBranches; 4132 SmallVector<const ExtractValueInst *, 2> Results; 4133 4134 for (const User *U : II->users()) { 4135 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 4136 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 4137 4138 if (EVI->getIndices()[0] == 0) 4139 Results.push_back(EVI); 4140 else { 4141 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 4142 4143 for (const auto *U : EVI->users()) 4144 if (const auto *B = dyn_cast<BranchInst>(U)) { 4145 assert(B->isConditional() && "How else is it using an i1?"); 4146 GuardingBranches.push_back(B); 4147 } 4148 } 4149 } else { 4150 // We are using the aggregate directly in a way we don't want to analyze 4151 // here (storing it to a global, say). 4152 return false; 4153 } 4154 } 4155 4156 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 4157 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 4158 if (!NoWrapEdge.isSingleEdge()) 4159 return false; 4160 4161 // Check if all users of the add are provably no-wrap. 4162 for (const auto *Result : Results) { 4163 // If the extractvalue itself is not executed on overflow, the we don't 4164 // need to check each use separately, since domination is transitive. 4165 if (DT.dominates(NoWrapEdge, Result->getParent())) 4166 continue; 4167 4168 for (auto &RU : Result->uses()) 4169 if (!DT.dominates(NoWrapEdge, RU)) 4170 return false; 4171 } 4172 4173 return true; 4174 }; 4175 4176 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); 4177 } 4178 4179 4180 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 4181 const DataLayout &DL, 4182 AssumptionCache *AC, 4183 const Instruction *CxtI, 4184 const DominatorTree *DT) { 4185 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 4186 Add, DL, AC, CxtI, DT); 4187 } 4188 4189 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 4190 const Value *RHS, 4191 const DataLayout &DL, 4192 AssumptionCache *AC, 4193 const Instruction *CxtI, 4194 const DominatorTree *DT) { 4195 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 4196 } 4197 4198 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 4199 // A memory operation returns normally if it isn't volatile. A volatile 4200 // operation is allowed to trap. 4201 // 4202 // An atomic operation isn't guaranteed to return in a reasonable amount of 4203 // time because it's possible for another thread to interfere with it for an 4204 // arbitrary length of time, but programs aren't allowed to rely on that. 4205 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 4206 return !LI->isVolatile(); 4207 if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 4208 return !SI->isVolatile(); 4209 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 4210 return !CXI->isVolatile(); 4211 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 4212 return !RMWI->isVolatile(); 4213 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I)) 4214 return !MII->isVolatile(); 4215 4216 // If there is no successor, then execution can't transfer to it. 4217 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) 4218 return !CRI->unwindsToCaller(); 4219 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) 4220 return !CatchSwitch->unwindsToCaller(); 4221 if (isa<ResumeInst>(I)) 4222 return false; 4223 if (isa<ReturnInst>(I)) 4224 return false; 4225 if (isa<UnreachableInst>(I)) 4226 return false; 4227 4228 // Calls can throw, or contain an infinite loop, or kill the process. 4229 if (auto CS = ImmutableCallSite(I)) { 4230 // Call sites that throw have implicit non-local control flow. 4231 if (!CS.doesNotThrow()) 4232 return false; 4233 4234 // Non-throwing call sites can loop infinitely, call exit/pthread_exit 4235 // etc. and thus not return. However, LLVM already assumes that 4236 // 4237 // - Thread exiting actions are modeled as writes to memory invisible to 4238 // the program. 4239 // 4240 // - Loops that don't have side effects (side effects are volatile/atomic 4241 // stores and IO) always terminate (see http://llvm.org/PR965). 4242 // Furthermore IO itself is also modeled as writes to memory invisible to 4243 // the program. 4244 // 4245 // We rely on those assumptions here, and use the memory effects of the call 4246 // target as a proxy for checking that it always returns. 4247 4248 // FIXME: This isn't aggressive enough; a call which only writes to a global 4249 // is guaranteed to return. 4250 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() || 4251 match(I, m_Intrinsic<Intrinsic::assume>()) || 4252 match(I, m_Intrinsic<Intrinsic::sideeffect>()); 4253 } 4254 4255 // Other instructions return normally. 4256 return true; 4257 } 4258 4259 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { 4260 // TODO: This is slightly consdervative for invoke instruction since exiting 4261 // via an exception *is* normal control for them. 4262 for (auto I = BB->begin(), E = BB->end(); I != E; ++I) 4263 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 4264 return false; 4265 return true; 4266 } 4267 4268 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 4269 const Loop *L) { 4270 // The loop header is guaranteed to be executed for every iteration. 4271 // 4272 // FIXME: Relax this constraint to cover all basic blocks that are 4273 // guaranteed to be executed at every iteration. 4274 if (I->getParent() != L->getHeader()) return false; 4275 4276 for (const Instruction &LI : *L->getHeader()) { 4277 if (&LI == I) return true; 4278 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 4279 } 4280 llvm_unreachable("Instruction not contained in its own parent basic block."); 4281 } 4282 4283 bool llvm::propagatesFullPoison(const Instruction *I) { 4284 switch (I->getOpcode()) { 4285 case Instruction::Add: 4286 case Instruction::Sub: 4287 case Instruction::Xor: 4288 case Instruction::Trunc: 4289 case Instruction::BitCast: 4290 case Instruction::AddrSpaceCast: 4291 case Instruction::Mul: 4292 case Instruction::Shl: 4293 case Instruction::GetElementPtr: 4294 // These operations all propagate poison unconditionally. Note that poison 4295 // is not any particular value, so xor or subtraction of poison with 4296 // itself still yields poison, not zero. 4297 return true; 4298 4299 case Instruction::AShr: 4300 case Instruction::SExt: 4301 // For these operations, one bit of the input is replicated across 4302 // multiple output bits. A replicated poison bit is still poison. 4303 return true; 4304 4305 case Instruction::ICmp: 4306 // Comparing poison with any value yields poison. This is why, for 4307 // instance, x s< (x +nsw 1) can be folded to true. 4308 return true; 4309 4310 default: 4311 return false; 4312 } 4313 } 4314 4315 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) { 4316 switch (I->getOpcode()) { 4317 case Instruction::Store: 4318 return cast<StoreInst>(I)->getPointerOperand(); 4319 4320 case Instruction::Load: 4321 return cast<LoadInst>(I)->getPointerOperand(); 4322 4323 case Instruction::AtomicCmpXchg: 4324 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 4325 4326 case Instruction::AtomicRMW: 4327 return cast<AtomicRMWInst>(I)->getPointerOperand(); 4328 4329 case Instruction::UDiv: 4330 case Instruction::SDiv: 4331 case Instruction::URem: 4332 case Instruction::SRem: 4333 return I->getOperand(1); 4334 4335 default: 4336 return nullptr; 4337 } 4338 } 4339 4340 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) { 4341 // We currently only look for uses of poison values within the same basic 4342 // block, as that makes it easier to guarantee that the uses will be 4343 // executed given that PoisonI is executed. 4344 // 4345 // FIXME: Expand this to consider uses beyond the same basic block. To do 4346 // this, look out for the distinction between post-dominance and strong 4347 // post-dominance. 4348 const BasicBlock *BB = PoisonI->getParent(); 4349 4350 // Set of instructions that we have proved will yield poison if PoisonI 4351 // does. 4352 SmallSet<const Value *, 16> YieldsPoison; 4353 SmallSet<const BasicBlock *, 4> Visited; 4354 YieldsPoison.insert(PoisonI); 4355 Visited.insert(PoisonI->getParent()); 4356 4357 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end(); 4358 4359 unsigned Iter = 0; 4360 while (Iter++ < MaxDepth) { 4361 for (auto &I : make_range(Begin, End)) { 4362 if (&I != PoisonI) { 4363 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I); 4364 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) 4365 return true; 4366 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 4367 return false; 4368 } 4369 4370 // Mark poison that propagates from I through uses of I. 4371 if (YieldsPoison.count(&I)) { 4372 for (const User *User : I.users()) { 4373 const Instruction *UserI = cast<Instruction>(User); 4374 if (propagatesFullPoison(UserI)) 4375 YieldsPoison.insert(User); 4376 } 4377 } 4378 } 4379 4380 if (auto *NextBB = BB->getSingleSuccessor()) { 4381 if (Visited.insert(NextBB).second) { 4382 BB = NextBB; 4383 Begin = BB->getFirstNonPHI()->getIterator(); 4384 End = BB->end(); 4385 continue; 4386 } 4387 } 4388 4389 break; 4390 } 4391 return false; 4392 } 4393 4394 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 4395 if (FMF.noNaNs()) 4396 return true; 4397 4398 if (auto *C = dyn_cast<ConstantFP>(V)) 4399 return !C->isNaN(); 4400 return false; 4401 } 4402 4403 static bool isKnownNonZero(const Value *V) { 4404 if (auto *C = dyn_cast<ConstantFP>(V)) 4405 return !C->isZero(); 4406 return false; 4407 } 4408 4409 /// Match clamp pattern for float types without care about NaNs or signed zeros. 4410 /// Given non-min/max outer cmp/select from the clamp pattern this 4411 /// function recognizes if it can be substitued by a "canonical" min/max 4412 /// pattern. 4413 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, 4414 Value *CmpLHS, Value *CmpRHS, 4415 Value *TrueVal, Value *FalseVal, 4416 Value *&LHS, Value *&RHS) { 4417 // Try to match 4418 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) 4419 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) 4420 // and return description of the outer Max/Min. 4421 4422 // First, check if select has inverse order: 4423 if (CmpRHS == FalseVal) { 4424 std::swap(TrueVal, FalseVal); 4425 Pred = CmpInst::getInversePredicate(Pred); 4426 } 4427 4428 // Assume success now. If there's no match, callers should not use these anyway. 4429 LHS = TrueVal; 4430 RHS = FalseVal; 4431 4432 const APFloat *FC1; 4433 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) 4434 return {SPF_UNKNOWN, SPNB_NA, false}; 4435 4436 const APFloat *FC2; 4437 switch (Pred) { 4438 case CmpInst::FCMP_OLT: 4439 case CmpInst::FCMP_OLE: 4440 case CmpInst::FCMP_ULT: 4441 case CmpInst::FCMP_ULE: 4442 if (match(FalseVal, 4443 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), 4444 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4445 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan) 4446 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; 4447 break; 4448 case CmpInst::FCMP_OGT: 4449 case CmpInst::FCMP_OGE: 4450 case CmpInst::FCMP_UGT: 4451 case CmpInst::FCMP_UGE: 4452 if (match(FalseVal, 4453 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), 4454 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4455 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan) 4456 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; 4457 break; 4458 default: 4459 break; 4460 } 4461 4462 return {SPF_UNKNOWN, SPNB_NA, false}; 4463 } 4464 4465 /// Recognize variations of: 4466 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 4467 static SelectPatternResult matchClamp(CmpInst::Predicate Pred, 4468 Value *CmpLHS, Value *CmpRHS, 4469 Value *TrueVal, Value *FalseVal) { 4470 // Swap the select operands and predicate to match the patterns below. 4471 if (CmpRHS != TrueVal) { 4472 Pred = ICmpInst::getSwappedPredicate(Pred); 4473 std::swap(TrueVal, FalseVal); 4474 } 4475 const APInt *C1; 4476 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 4477 const APInt *C2; 4478 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 4479 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 4480 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 4481 return {SPF_SMAX, SPNB_NA, false}; 4482 4483 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 4484 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 4485 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 4486 return {SPF_SMIN, SPNB_NA, false}; 4487 4488 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 4489 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 4490 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 4491 return {SPF_UMAX, SPNB_NA, false}; 4492 4493 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 4494 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 4495 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 4496 return {SPF_UMIN, SPNB_NA, false}; 4497 } 4498 return {SPF_UNKNOWN, SPNB_NA, false}; 4499 } 4500 4501 /// Recognize variations of: 4502 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) 4503 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, 4504 Value *CmpLHS, Value *CmpRHS, 4505 Value *TVal, Value *FVal, 4506 unsigned Depth) { 4507 // TODO: Allow FP min/max with nnan/nsz. 4508 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison"); 4509 4510 Value *A, *B; 4511 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); 4512 if (!SelectPatternResult::isMinOrMax(L.Flavor)) 4513 return {SPF_UNKNOWN, SPNB_NA, false}; 4514 4515 Value *C, *D; 4516 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); 4517 if (L.Flavor != R.Flavor) 4518 return {SPF_UNKNOWN, SPNB_NA, false}; 4519 4520 // We have something like: x Pred y ? min(a, b) : min(c, d). 4521 // Try to match the compare to the min/max operations of the select operands. 4522 // First, make sure we have the right compare predicate. 4523 switch (L.Flavor) { 4524 case SPF_SMIN: 4525 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { 4526 Pred = ICmpInst::getSwappedPredicate(Pred); 4527 std::swap(CmpLHS, CmpRHS); 4528 } 4529 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 4530 break; 4531 return {SPF_UNKNOWN, SPNB_NA, false}; 4532 case SPF_SMAX: 4533 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { 4534 Pred = ICmpInst::getSwappedPredicate(Pred); 4535 std::swap(CmpLHS, CmpRHS); 4536 } 4537 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 4538 break; 4539 return {SPF_UNKNOWN, SPNB_NA, false}; 4540 case SPF_UMIN: 4541 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { 4542 Pred = ICmpInst::getSwappedPredicate(Pred); 4543 std::swap(CmpLHS, CmpRHS); 4544 } 4545 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 4546 break; 4547 return {SPF_UNKNOWN, SPNB_NA, false}; 4548 case SPF_UMAX: 4549 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 4550 Pred = ICmpInst::getSwappedPredicate(Pred); 4551 std::swap(CmpLHS, CmpRHS); 4552 } 4553 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 4554 break; 4555 return {SPF_UNKNOWN, SPNB_NA, false}; 4556 default: 4557 return {SPF_UNKNOWN, SPNB_NA, false}; 4558 } 4559 4560 // If there is a common operand in the already matched min/max and the other 4561 // min/max operands match the compare operands (either directly or inverted), 4562 // then this is min/max of the same flavor. 4563 4564 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4565 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4566 if (D == B) { 4567 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4568 match(A, m_Not(m_Specific(CmpRHS))))) 4569 return {L.Flavor, SPNB_NA, false}; 4570 } 4571 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4572 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4573 if (C == B) { 4574 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4575 match(A, m_Not(m_Specific(CmpRHS))))) 4576 return {L.Flavor, SPNB_NA, false}; 4577 } 4578 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4579 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4580 if (D == A) { 4581 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4582 match(B, m_Not(m_Specific(CmpRHS))))) 4583 return {L.Flavor, SPNB_NA, false}; 4584 } 4585 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4586 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4587 if (C == A) { 4588 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4589 match(B, m_Not(m_Specific(CmpRHS))))) 4590 return {L.Flavor, SPNB_NA, false}; 4591 } 4592 4593 return {SPF_UNKNOWN, SPNB_NA, false}; 4594 } 4595 4596 /// Match non-obvious integer minimum and maximum sequences. 4597 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 4598 Value *CmpLHS, Value *CmpRHS, 4599 Value *TrueVal, Value *FalseVal, 4600 Value *&LHS, Value *&RHS, 4601 unsigned Depth) { 4602 // Assume success. If there's no match, callers should not use these anyway. 4603 LHS = TrueVal; 4604 RHS = FalseVal; 4605 4606 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); 4607 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4608 return SPR; 4609 4610 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); 4611 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4612 return SPR; 4613 4614 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 4615 return {SPF_UNKNOWN, SPNB_NA, false}; 4616 4617 // Z = X -nsw Y 4618 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) 4619 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) 4620 if (match(TrueVal, m_Zero()) && 4621 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4622 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4623 4624 // Z = X -nsw Y 4625 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) 4626 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) 4627 if (match(FalseVal, m_Zero()) && 4628 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4629 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4630 4631 const APInt *C1; 4632 if (!match(CmpRHS, m_APInt(C1))) 4633 return {SPF_UNKNOWN, SPNB_NA, false}; 4634 4635 // An unsigned min/max can be written with a signed compare. 4636 const APInt *C2; 4637 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 4638 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 4639 // Is the sign bit set? 4640 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 4641 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 4642 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() && 4643 C2->isMaxSignedValue()) 4644 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4645 4646 // Is the sign bit clear? 4647 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 4648 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 4649 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && 4650 C2->isMinSignedValue()) 4651 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4652 } 4653 4654 // Look through 'not' ops to find disguised signed min/max. 4655 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C) 4656 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C) 4657 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) && 4658 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2) 4659 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4660 4661 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X) 4662 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X) 4663 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) && 4664 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2) 4665 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4666 4667 return {SPF_UNKNOWN, SPNB_NA, false}; 4668 } 4669 4670 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) { 4671 assert(X && Y && "Invalid operand"); 4672 4673 // X = sub (0, Y) || X = sub nsw (0, Y) 4674 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) || 4675 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y))))) 4676 return true; 4677 4678 // Y = sub (0, X) || Y = sub nsw (0, X) 4679 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) || 4680 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X))))) 4681 return true; 4682 4683 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A) 4684 Value *A, *B; 4685 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) && 4686 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) || 4687 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) && 4688 match(Y, m_NSWSub(m_Specific(B), m_Specific(A))))); 4689 } 4690 4691 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 4692 FastMathFlags FMF, 4693 Value *CmpLHS, Value *CmpRHS, 4694 Value *TrueVal, Value *FalseVal, 4695 Value *&LHS, Value *&RHS, 4696 unsigned Depth) { 4697 LHS = CmpLHS; 4698 RHS = CmpRHS; 4699 4700 // Signed zero may return inconsistent results between implementations. 4701 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 4702 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 4703 // Therefore, we behave conservatively and only proceed if at least one of the 4704 // operands is known to not be zero or if we don't care about signed zero. 4705 switch (Pred) { 4706 default: break; 4707 // FIXME: Include OGT/OLT/UGT/ULT. 4708 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 4709 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 4710 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4711 !isKnownNonZero(CmpRHS)) 4712 return {SPF_UNKNOWN, SPNB_NA, false}; 4713 } 4714 4715 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 4716 bool Ordered = false; 4717 4718 // When given one NaN and one non-NaN input: 4719 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 4720 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 4721 // ordered comparison fails), which could be NaN or non-NaN. 4722 // so here we discover exactly what NaN behavior is required/accepted. 4723 if (CmpInst::isFPPredicate(Pred)) { 4724 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 4725 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 4726 4727 if (LHSSafe && RHSSafe) { 4728 // Both operands are known non-NaN. 4729 NaNBehavior = SPNB_RETURNS_ANY; 4730 } else if (CmpInst::isOrdered(Pred)) { 4731 // An ordered comparison will return false when given a NaN, so it 4732 // returns the RHS. 4733 Ordered = true; 4734 if (LHSSafe) 4735 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 4736 NaNBehavior = SPNB_RETURNS_NAN; 4737 else if (RHSSafe) 4738 NaNBehavior = SPNB_RETURNS_OTHER; 4739 else 4740 // Completely unsafe. 4741 return {SPF_UNKNOWN, SPNB_NA, false}; 4742 } else { 4743 Ordered = false; 4744 // An unordered comparison will return true when given a NaN, so it 4745 // returns the LHS. 4746 if (LHSSafe) 4747 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 4748 NaNBehavior = SPNB_RETURNS_OTHER; 4749 else if (RHSSafe) 4750 NaNBehavior = SPNB_RETURNS_NAN; 4751 else 4752 // Completely unsafe. 4753 return {SPF_UNKNOWN, SPNB_NA, false}; 4754 } 4755 } 4756 4757 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 4758 std::swap(CmpLHS, CmpRHS); 4759 Pred = CmpInst::getSwappedPredicate(Pred); 4760 if (NaNBehavior == SPNB_RETURNS_NAN) 4761 NaNBehavior = SPNB_RETURNS_OTHER; 4762 else if (NaNBehavior == SPNB_RETURNS_OTHER) 4763 NaNBehavior = SPNB_RETURNS_NAN; 4764 Ordered = !Ordered; 4765 } 4766 4767 // ([if]cmp X, Y) ? X : Y 4768 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 4769 switch (Pred) { 4770 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 4771 case ICmpInst::ICMP_UGT: 4772 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 4773 case ICmpInst::ICMP_SGT: 4774 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 4775 case ICmpInst::ICMP_ULT: 4776 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 4777 case ICmpInst::ICMP_SLT: 4778 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 4779 case FCmpInst::FCMP_UGT: 4780 case FCmpInst::FCMP_UGE: 4781 case FCmpInst::FCMP_OGT: 4782 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 4783 case FCmpInst::FCMP_ULT: 4784 case FCmpInst::FCMP_ULE: 4785 case FCmpInst::FCMP_OLT: 4786 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 4787 } 4788 } 4789 4790 if (isKnownNegation(TrueVal, FalseVal)) { 4791 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can 4792 // match against either LHS or sext(LHS). 4793 auto MaybeSExtCmpLHS = 4794 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS))); 4795 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes()); 4796 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One()); 4797 if (match(TrueVal, MaybeSExtCmpLHS)) { 4798 // Set the return values. If the compare uses the negated value (-X >s 0), 4799 // swap the return values because the negated value is always 'RHS'. 4800 LHS = TrueVal; 4801 RHS = FalseVal; 4802 if (match(CmpLHS, m_Neg(m_Specific(FalseVal)))) 4803 std::swap(LHS, RHS); 4804 4805 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X) 4806 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X) 4807 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 4808 return {SPF_ABS, SPNB_NA, false}; 4809 4810 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X) 4811 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X) 4812 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 4813 return {SPF_NABS, SPNB_NA, false}; 4814 } 4815 else if (match(FalseVal, MaybeSExtCmpLHS)) { 4816 // Set the return values. If the compare uses the negated value (-X >s 0), 4817 // swap the return values because the negated value is always 'RHS'. 4818 LHS = FalseVal; 4819 RHS = TrueVal; 4820 if (match(CmpLHS, m_Neg(m_Specific(TrueVal)))) 4821 std::swap(LHS, RHS); 4822 4823 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X) 4824 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X) 4825 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 4826 return {SPF_NABS, SPNB_NA, false}; 4827 4828 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X) 4829 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X) 4830 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 4831 return {SPF_ABS, SPNB_NA, false}; 4832 } 4833 } 4834 4835 if (CmpInst::isIntPredicate(Pred)) 4836 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); 4837 4838 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar 4839 // may return either -0.0 or 0.0, so fcmp/select pair has stricter 4840 // semantics than minNum. Be conservative in such case. 4841 if (NaNBehavior != SPNB_RETURNS_ANY || 4842 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4843 !isKnownNonZero(CmpRHS))) 4844 return {SPF_UNKNOWN, SPNB_NA, false}; 4845 4846 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 4847 } 4848 4849 /// Helps to match a select pattern in case of a type mismatch. 4850 /// 4851 /// The function processes the case when type of true and false values of a 4852 /// select instruction differs from type of the cmp instruction operands because 4853 /// of a cast instruction. The function checks if it is legal to move the cast 4854 /// operation after "select". If yes, it returns the new second value of 4855 /// "select" (with the assumption that cast is moved): 4856 /// 1. As operand of cast instruction when both values of "select" are same cast 4857 /// instructions. 4858 /// 2. As restored constant (by applying reverse cast operation) when the first 4859 /// value of the "select" is a cast operation and the second value is a 4860 /// constant. 4861 /// NOTE: We return only the new second value because the first value could be 4862 /// accessed as operand of cast instruction. 4863 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 4864 Instruction::CastOps *CastOp) { 4865 auto *Cast1 = dyn_cast<CastInst>(V1); 4866 if (!Cast1) 4867 return nullptr; 4868 4869 *CastOp = Cast1->getOpcode(); 4870 Type *SrcTy = Cast1->getSrcTy(); 4871 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 4872 // If V1 and V2 are both the same cast from the same type, look through V1. 4873 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 4874 return Cast2->getOperand(0); 4875 return nullptr; 4876 } 4877 4878 auto *C = dyn_cast<Constant>(V2); 4879 if (!C) 4880 return nullptr; 4881 4882 Constant *CastedTo = nullptr; 4883 switch (*CastOp) { 4884 case Instruction::ZExt: 4885 if (CmpI->isUnsigned()) 4886 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 4887 break; 4888 case Instruction::SExt: 4889 if (CmpI->isSigned()) 4890 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 4891 break; 4892 case Instruction::Trunc: 4893 Constant *CmpConst; 4894 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && 4895 CmpConst->getType() == SrcTy) { 4896 // Here we have the following case: 4897 // 4898 // %cond = cmp iN %x, CmpConst 4899 // %tr = trunc iN %x to iK 4900 // %narrowsel = select i1 %cond, iK %t, iK C 4901 // 4902 // We can always move trunc after select operation: 4903 // 4904 // %cond = cmp iN %x, CmpConst 4905 // %widesel = select i1 %cond, iN %x, iN CmpConst 4906 // %tr = trunc iN %widesel to iK 4907 // 4908 // Note that C could be extended in any way because we don't care about 4909 // upper bits after truncation. It can't be abs pattern, because it would 4910 // look like: 4911 // 4912 // select i1 %cond, x, -x. 4913 // 4914 // So only min/max pattern could be matched. Such match requires widened C 4915 // == CmpConst. That is why set widened C = CmpConst, condition trunc 4916 // CmpConst == C is checked below. 4917 CastedTo = CmpConst; 4918 } else { 4919 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 4920 } 4921 break; 4922 case Instruction::FPTrunc: 4923 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 4924 break; 4925 case Instruction::FPExt: 4926 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 4927 break; 4928 case Instruction::FPToUI: 4929 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 4930 break; 4931 case Instruction::FPToSI: 4932 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 4933 break; 4934 case Instruction::UIToFP: 4935 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 4936 break; 4937 case Instruction::SIToFP: 4938 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 4939 break; 4940 default: 4941 break; 4942 } 4943 4944 if (!CastedTo) 4945 return nullptr; 4946 4947 // Make sure the cast doesn't lose any information. 4948 Constant *CastedBack = 4949 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 4950 if (CastedBack != C) 4951 return nullptr; 4952 4953 return CastedTo; 4954 } 4955 4956 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 4957 Instruction::CastOps *CastOp, 4958 unsigned Depth) { 4959 if (Depth >= MaxDepth) 4960 return {SPF_UNKNOWN, SPNB_NA, false}; 4961 4962 SelectInst *SI = dyn_cast<SelectInst>(V); 4963 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 4964 4965 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 4966 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 4967 4968 CmpInst::Predicate Pred = CmpI->getPredicate(); 4969 Value *CmpLHS = CmpI->getOperand(0); 4970 Value *CmpRHS = CmpI->getOperand(1); 4971 Value *TrueVal = SI->getTrueValue(); 4972 Value *FalseVal = SI->getFalseValue(); 4973 FastMathFlags FMF; 4974 if (isa<FPMathOperator>(CmpI)) 4975 FMF = CmpI->getFastMathFlags(); 4976 4977 // Bail out early. 4978 if (CmpI->isEquality()) 4979 return {SPF_UNKNOWN, SPNB_NA, false}; 4980 4981 // Deal with type mismatches. 4982 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 4983 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { 4984 // If this is a potential fmin/fmax with a cast to integer, then ignore 4985 // -0.0 because there is no corresponding integer value. 4986 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 4987 FMF.setNoSignedZeros(); 4988 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4989 cast<CastInst>(TrueVal)->getOperand(0), C, 4990 LHS, RHS, Depth); 4991 } 4992 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { 4993 // If this is a potential fmin/fmax with a cast to integer, then ignore 4994 // -0.0 because there is no corresponding integer value. 4995 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 4996 FMF.setNoSignedZeros(); 4997 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4998 C, cast<CastInst>(FalseVal)->getOperand(0), 4999 LHS, RHS, Depth); 5000 } 5001 } 5002 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 5003 LHS, RHS, Depth); 5004 } 5005 5006 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) { 5007 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT; 5008 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT; 5009 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT; 5010 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT; 5011 if (SPF == SPF_FMINNUM) 5012 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; 5013 if (SPF == SPF_FMAXNUM) 5014 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; 5015 llvm_unreachable("unhandled!"); 5016 } 5017 5018 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) { 5019 if (SPF == SPF_SMIN) return SPF_SMAX; 5020 if (SPF == SPF_UMIN) return SPF_UMAX; 5021 if (SPF == SPF_SMAX) return SPF_SMIN; 5022 if (SPF == SPF_UMAX) return SPF_UMIN; 5023 llvm_unreachable("unhandled!"); 5024 } 5025 5026 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) { 5027 return getMinMaxPred(getInverseMinMaxFlavor(SPF)); 5028 } 5029 5030 /// Return true if "icmp Pred LHS RHS" is always true. 5031 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, 5032 const Value *RHS, const DataLayout &DL, 5033 unsigned Depth) { 5034 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 5035 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 5036 return true; 5037 5038 switch (Pred) { 5039 default: 5040 return false; 5041 5042 case CmpInst::ICMP_SLE: { 5043 const APInt *C; 5044 5045 // LHS s<= LHS +_{nsw} C if C >= 0 5046 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 5047 return !C->isNegative(); 5048 return false; 5049 } 5050 5051 case CmpInst::ICMP_ULE: { 5052 const APInt *C; 5053 5054 // LHS u<= LHS +_{nuw} C for any C 5055 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 5056 return true; 5057 5058 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 5059 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 5060 const Value *&X, 5061 const APInt *&CA, const APInt *&CB) { 5062 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 5063 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 5064 return true; 5065 5066 // If X & C == 0 then (X | C) == X +_{nuw} C 5067 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 5068 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 5069 KnownBits Known(CA->getBitWidth()); 5070 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, 5071 /*CxtI*/ nullptr, /*DT*/ nullptr); 5072 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 5073 return true; 5074 } 5075 5076 return false; 5077 }; 5078 5079 const Value *X; 5080 const APInt *CLHS, *CRHS; 5081 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 5082 return CLHS->ule(*CRHS); 5083 5084 return false; 5085 } 5086 } 5087 } 5088 5089 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 5090 /// ALHS ARHS" is true. Otherwise, return None. 5091 static Optional<bool> 5092 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 5093 const Value *ARHS, const Value *BLHS, const Value *BRHS, 5094 const DataLayout &DL, unsigned Depth) { 5095 switch (Pred) { 5096 default: 5097 return None; 5098 5099 case CmpInst::ICMP_SLT: 5100 case CmpInst::ICMP_SLE: 5101 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && 5102 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) 5103 return true; 5104 return None; 5105 5106 case CmpInst::ICMP_ULT: 5107 case CmpInst::ICMP_ULE: 5108 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && 5109 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) 5110 return true; 5111 return None; 5112 } 5113 } 5114 5115 /// Return true if the operands of the two compares match. IsSwappedOps is true 5116 /// when the operands match, but are swapped. 5117 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 5118 const Value *BLHS, const Value *BRHS, 5119 bool &IsSwappedOps) { 5120 5121 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 5122 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 5123 return IsMatchingOps || IsSwappedOps; 5124 } 5125 5126 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is 5127 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS 5128 /// BRHS" is false. Otherwise, return None if we can't infer anything. 5129 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 5130 const Value *ALHS, 5131 const Value *ARHS, 5132 CmpInst::Predicate BPred, 5133 const Value *BLHS, 5134 const Value *BRHS, 5135 bool IsSwappedOps) { 5136 // Canonicalize the operands so they're matching. 5137 if (IsSwappedOps) { 5138 std::swap(BLHS, BRHS); 5139 BPred = ICmpInst::getSwappedPredicate(BPred); 5140 } 5141 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 5142 return true; 5143 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 5144 return false; 5145 5146 return None; 5147 } 5148 5149 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is 5150 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS 5151 /// C2" is false. Otherwise, return None if we can't infer anything. 5152 static Optional<bool> 5153 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS, 5154 const ConstantInt *C1, 5155 CmpInst::Predicate BPred, 5156 const Value *BLHS, const ConstantInt *C2) { 5157 assert(ALHS == BLHS && "LHS operands must match."); 5158 ConstantRange DomCR = 5159 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 5160 ConstantRange CR = 5161 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); 5162 ConstantRange Intersection = DomCR.intersectWith(CR); 5163 ConstantRange Difference = DomCR.difference(CR); 5164 if (Intersection.isEmptySet()) 5165 return false; 5166 if (Difference.isEmptySet()) 5167 return true; 5168 return None; 5169 } 5170 5171 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 5172 /// false. Otherwise, return None if we can't infer anything. 5173 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS, 5174 const ICmpInst *RHS, 5175 const DataLayout &DL, bool LHSIsTrue, 5176 unsigned Depth) { 5177 Value *ALHS = LHS->getOperand(0); 5178 Value *ARHS = LHS->getOperand(1); 5179 // The rest of the logic assumes the LHS condition is true. If that's not the 5180 // case, invert the predicate to make it so. 5181 ICmpInst::Predicate APred = 5182 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); 5183 5184 Value *BLHS = RHS->getOperand(0); 5185 Value *BRHS = RHS->getOperand(1); 5186 ICmpInst::Predicate BPred = RHS->getPredicate(); 5187 5188 // Can we infer anything when the two compares have matching operands? 5189 bool IsSwappedOps; 5190 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) { 5191 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 5192 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps)) 5193 return Implication; 5194 // No amount of additional analysis will infer the second condition, so 5195 // early exit. 5196 return None; 5197 } 5198 5199 // Can we infer anything when the LHS operands match and the RHS operands are 5200 // constants (not necessarily matching)? 5201 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 5202 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 5203 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS, 5204 cast<ConstantInt>(BRHS))) 5205 return Implication; 5206 // No amount of additional analysis will infer the second condition, so 5207 // early exit. 5208 return None; 5209 } 5210 5211 if (APred == BPred) 5212 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth); 5213 return None; 5214 } 5215 5216 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 5217 /// false. Otherwise, return None if we can't infer anything. We expect the 5218 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction. 5219 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS, 5220 const ICmpInst *RHS, 5221 const DataLayout &DL, bool LHSIsTrue, 5222 unsigned Depth) { 5223 // The LHS must be an 'or' or an 'and' instruction. 5224 assert((LHS->getOpcode() == Instruction::And || 5225 LHS->getOpcode() == Instruction::Or) && 5226 "Expected LHS to be 'and' or 'or'."); 5227 5228 assert(Depth <= MaxDepth && "Hit recursion limit"); 5229 5230 // If the result of an 'or' is false, then we know both legs of the 'or' are 5231 // false. Similarly, if the result of an 'and' is true, then we know both 5232 // legs of the 'and' are true. 5233 Value *ALHS, *ARHS; 5234 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) || 5235 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) { 5236 // FIXME: Make this non-recursion. 5237 if (Optional<bool> Implication = 5238 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1)) 5239 return Implication; 5240 if (Optional<bool> Implication = 5241 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1)) 5242 return Implication; 5243 return None; 5244 } 5245 return None; 5246 } 5247 5248 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 5249 const DataLayout &DL, bool LHSIsTrue, 5250 unsigned Depth) { 5251 // Bail out when we hit the limit. 5252 if (Depth == MaxDepth) 5253 return None; 5254 5255 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for 5256 // example. 5257 if (LHS->getType() != RHS->getType()) 5258 return None; 5259 5260 Type *OpTy = LHS->getType(); 5261 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!"); 5262 5263 // LHS ==> RHS by definition 5264 if (LHS == RHS) 5265 return LHSIsTrue; 5266 5267 // FIXME: Extending the code below to handle vectors. 5268 if (OpTy->isVectorTy()) 5269 return None; 5270 5271 assert(OpTy->isIntegerTy(1) && "implied by above"); 5272 5273 // Both LHS and RHS are icmps. 5274 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); 5275 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS); 5276 if (LHSCmp && RHSCmp) 5277 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth); 5278 5279 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be 5280 // an icmp. FIXME: Add support for and/or on the RHS. 5281 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS); 5282 if (LHSBO && RHSCmp) { 5283 if ((LHSBO->getOpcode() == Instruction::And || 5284 LHSBO->getOpcode() == Instruction::Or)) 5285 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth); 5286 } 5287 return None; 5288 } 5289