1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains routines that help analyze properties that chains of 11 // computations have. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/InstructionSimplify.h" 30 #include "llvm/Analysis/Loads.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 33 #include "llvm/Analysis/TargetLibraryInfo.h" 34 #include "llvm/IR/Argument.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/CallSite.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/ConstantRange.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/DiagnosticInfo.h" 44 #include "llvm/IR/Dominators.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/GetElementPtrTypeIterator.h" 47 #include "llvm/IR/GlobalAlias.h" 48 #include "llvm/IR/GlobalValue.h" 49 #include "llvm/IR/GlobalVariable.h" 50 #include "llvm/IR/InstrTypes.h" 51 #include "llvm/IR/Instruction.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/IntrinsicInst.h" 54 #include "llvm/IR/Intrinsics.h" 55 #include "llvm/IR/LLVMContext.h" 56 #include "llvm/IR/Metadata.h" 57 #include "llvm/IR/Module.h" 58 #include "llvm/IR/Operator.h" 59 #include "llvm/IR/PatternMatch.h" 60 #include "llvm/IR/Type.h" 61 #include "llvm/IR/User.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/Support/Casting.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/Compiler.h" 66 #include "llvm/Support/ErrorHandling.h" 67 #include "llvm/Support/KnownBits.h" 68 #include "llvm/Support/MathExtras.h" 69 #include <algorithm> 70 #include <array> 71 #include <cassert> 72 #include <cstdint> 73 #include <iterator> 74 #include <utility> 75 76 using namespace llvm; 77 using namespace llvm::PatternMatch; 78 79 const unsigned MaxDepth = 6; 80 81 // Controls the number of uses of the value searched for possible 82 // dominating comparisons. 83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 84 cl::Hidden, cl::init(20)); 85 86 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 87 /// returns the element type's bitwidth. 88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 89 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 90 return BitWidth; 91 92 return DL.getIndexTypeSizeInBits(Ty); 93 } 94 95 namespace { 96 97 // Simplifying using an assume can only be done in a particular control-flow 98 // context (the context instruction provides that context). If an assume and 99 // the context instruction are not in the same block then the DT helps in 100 // figuring out if we can use it. 101 struct Query { 102 const DataLayout &DL; 103 AssumptionCache *AC; 104 const Instruction *CxtI; 105 const DominatorTree *DT; 106 107 // Unlike the other analyses, this may be a nullptr because not all clients 108 // provide it currently. 109 OptimizationRemarkEmitter *ORE; 110 111 /// Set of assumptions that should be excluded from further queries. 112 /// This is because of the potential for mutual recursion to cause 113 /// computeKnownBits to repeatedly visit the same assume intrinsic. The 114 /// classic case of this is assume(x = y), which will attempt to determine 115 /// bits in x from bits in y, which will attempt to determine bits in y from 116 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 117 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo 118 /// (all of which can call computeKnownBits), and so on. 119 std::array<const Value *, MaxDepth> Excluded; 120 121 unsigned NumExcluded = 0; 122 123 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 124 const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr) 125 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE) {} 126 127 Query(const Query &Q, const Value *NewExcl) 128 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), 129 NumExcluded(Q.NumExcluded) { 130 Excluded = Q.Excluded; 131 Excluded[NumExcluded++] = NewExcl; 132 assert(NumExcluded <= Excluded.size()); 133 } 134 135 bool isExcluded(const Value *Value) const { 136 if (NumExcluded == 0) 137 return false; 138 auto End = Excluded.begin() + NumExcluded; 139 return std::find(Excluded.begin(), End, Value) != End; 140 } 141 }; 142 143 } // end anonymous namespace 144 145 // Given the provided Value and, potentially, a context instruction, return 146 // the preferred context instruction (if any). 147 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 148 // If we've been provided with a context instruction, then use that (provided 149 // it has been inserted). 150 if (CxtI && CxtI->getParent()) 151 return CxtI; 152 153 // If the value is really an already-inserted instruction, then use that. 154 CxtI = dyn_cast<Instruction>(V); 155 if (CxtI && CxtI->getParent()) 156 return CxtI; 157 158 return nullptr; 159 } 160 161 static void computeKnownBits(const Value *V, KnownBits &Known, 162 unsigned Depth, const Query &Q); 163 164 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 165 const DataLayout &DL, unsigned Depth, 166 AssumptionCache *AC, const Instruction *CxtI, 167 const DominatorTree *DT, 168 OptimizationRemarkEmitter *ORE) { 169 ::computeKnownBits(V, Known, Depth, 170 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE)); 171 } 172 173 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 174 const Query &Q); 175 176 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 177 unsigned Depth, AssumptionCache *AC, 178 const Instruction *CxtI, 179 const DominatorTree *DT, 180 OptimizationRemarkEmitter *ORE) { 181 return ::computeKnownBits(V, Depth, 182 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE)); 183 } 184 185 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 186 const DataLayout &DL, 187 AssumptionCache *AC, const Instruction *CxtI, 188 const DominatorTree *DT) { 189 assert(LHS->getType() == RHS->getType() && 190 "LHS and RHS should have the same type"); 191 assert(LHS->getType()->isIntOrIntVectorTy() && 192 "LHS and RHS should be integers"); 193 // Look for an inverted mask: (X & ~M) op (Y & M). 194 Value *M; 195 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 196 match(RHS, m_c_And(m_Specific(M), m_Value()))) 197 return true; 198 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 199 match(LHS, m_c_And(m_Specific(M), m_Value()))) 200 return true; 201 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 202 KnownBits LHSKnown(IT->getBitWidth()); 203 KnownBits RHSKnown(IT->getBitWidth()); 204 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT); 205 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT); 206 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue(); 207 } 208 209 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) { 210 for (const User *U : CxtI->users()) { 211 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) 212 if (IC->isEquality()) 213 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1))) 214 if (C->isNullValue()) 215 continue; 216 return false; 217 } 218 return true; 219 } 220 221 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 222 const Query &Q); 223 224 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 225 bool OrZero, 226 unsigned Depth, AssumptionCache *AC, 227 const Instruction *CxtI, 228 const DominatorTree *DT) { 229 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, 230 Query(DL, AC, safeCxtI(V, CxtI), DT)); 231 } 232 233 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 234 235 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 236 AssumptionCache *AC, const Instruction *CxtI, 237 const DominatorTree *DT) { 238 return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 239 } 240 241 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 242 unsigned Depth, 243 AssumptionCache *AC, const Instruction *CxtI, 244 const DominatorTree *DT) { 245 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT); 246 return Known.isNonNegative(); 247 } 248 249 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 250 AssumptionCache *AC, const Instruction *CxtI, 251 const DominatorTree *DT) { 252 if (auto *CI = dyn_cast<ConstantInt>(V)) 253 return CI->getValue().isStrictlyPositive(); 254 255 // TODO: We'd doing two recursive queries here. We should factor this such 256 // that only a single query is needed. 257 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) && 258 isKnownNonZero(V, DL, Depth, AC, CxtI, DT); 259 } 260 261 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 262 AssumptionCache *AC, const Instruction *CxtI, 263 const DominatorTree *DT) { 264 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT); 265 return Known.isNegative(); 266 } 267 268 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); 269 270 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 271 const DataLayout &DL, 272 AssumptionCache *AC, const Instruction *CxtI, 273 const DominatorTree *DT) { 274 return ::isKnownNonEqual(V1, V2, Query(DL, AC, 275 safeCxtI(V1, safeCxtI(V2, CxtI)), 276 DT)); 277 } 278 279 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 280 const Query &Q); 281 282 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 283 const DataLayout &DL, 284 unsigned Depth, AssumptionCache *AC, 285 const Instruction *CxtI, const DominatorTree *DT) { 286 return ::MaskedValueIsZero(V, Mask, Depth, 287 Query(DL, AC, safeCxtI(V, CxtI), DT)); 288 } 289 290 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 291 const Query &Q); 292 293 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 294 unsigned Depth, AssumptionCache *AC, 295 const Instruction *CxtI, 296 const DominatorTree *DT) { 297 return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 298 } 299 300 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 301 bool NSW, 302 KnownBits &KnownOut, KnownBits &Known2, 303 unsigned Depth, const Query &Q) { 304 unsigned BitWidth = KnownOut.getBitWidth(); 305 306 // If an initial sequence of bits in the result is not needed, the 307 // corresponding bits in the operands are not needed. 308 KnownBits LHSKnown(BitWidth); 309 computeKnownBits(Op0, LHSKnown, Depth + 1, Q); 310 computeKnownBits(Op1, Known2, Depth + 1, Q); 311 312 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2); 313 } 314 315 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 316 KnownBits &Known, KnownBits &Known2, 317 unsigned Depth, const Query &Q) { 318 unsigned BitWidth = Known.getBitWidth(); 319 computeKnownBits(Op1, Known, Depth + 1, Q); 320 computeKnownBits(Op0, Known2, Depth + 1, Q); 321 322 bool isKnownNegative = false; 323 bool isKnownNonNegative = false; 324 // If the multiplication is known not to overflow, compute the sign bit. 325 if (NSW) { 326 if (Op0 == Op1) { 327 // The product of a number with itself is non-negative. 328 isKnownNonNegative = true; 329 } else { 330 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 331 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 332 bool isKnownNegativeOp1 = Known.isNegative(); 333 bool isKnownNegativeOp0 = Known2.isNegative(); 334 // The product of two numbers with the same sign is non-negative. 335 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 336 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 337 // The product of a negative number and a non-negative number is either 338 // negative or zero. 339 if (!isKnownNonNegative) 340 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 341 isKnownNonZero(Op0, Depth, Q)) || 342 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 343 isKnownNonZero(Op1, Depth, Q)); 344 } 345 } 346 347 assert(!Known.hasConflict() && !Known2.hasConflict()); 348 // Compute a conservative estimate for high known-0 bits. 349 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 350 Known2.countMinLeadingZeros(), 351 BitWidth) - BitWidth; 352 LeadZ = std::min(LeadZ, BitWidth); 353 354 // The result of the bottom bits of an integer multiply can be 355 // inferred by looking at the bottom bits of both operands and 356 // multiplying them together. 357 // We can infer at least the minimum number of known trailing bits 358 // of both operands. Depending on number of trailing zeros, we can 359 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming 360 // a and b are divisible by m and n respectively. 361 // We then calculate how many of those bits are inferrable and set 362 // the output. For example, the i8 mul: 363 // a = XXXX1100 (12) 364 // b = XXXX1110 (14) 365 // We know the bottom 3 bits are zero since the first can be divided by 366 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4). 367 // Applying the multiplication to the trimmed arguments gets: 368 // XX11 (3) 369 // X111 (7) 370 // ------- 371 // XX11 372 // XX11 373 // XX11 374 // XX11 375 // ------- 376 // XXXXX01 377 // Which allows us to infer the 2 LSBs. Since we're multiplying the result 378 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits. 379 // The proof for this can be described as: 380 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) && 381 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) + 382 // umin(countTrailingZeros(C2), C6) + 383 // umin(C5 - umin(countTrailingZeros(C1), C5), 384 // C6 - umin(countTrailingZeros(C2), C6)))) - 1) 385 // %aa = shl i8 %a, C5 386 // %bb = shl i8 %b, C6 387 // %aaa = or i8 %aa, C1 388 // %bbb = or i8 %bb, C2 389 // %mul = mul i8 %aaa, %bbb 390 // %mask = and i8 %mul, C7 391 // => 392 // %mask = i8 ((C1*C2)&C7) 393 // Where C5, C6 describe the known bits of %a, %b 394 // C1, C2 describe the known bottom bits of %a, %b. 395 // C7 describes the mask of the known bits of the result. 396 APInt Bottom0 = Known.One; 397 APInt Bottom1 = Known2.One; 398 399 // How many times we'd be able to divide each argument by 2 (shr by 1). 400 // This gives us the number of trailing zeros on the multiplication result. 401 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes(); 402 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes(); 403 unsigned TrailZero0 = Known.countMinTrailingZeros(); 404 unsigned TrailZero1 = Known2.countMinTrailingZeros(); 405 unsigned TrailZ = TrailZero0 + TrailZero1; 406 407 // Figure out the fewest known-bits operand. 408 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0, 409 TrailBitsKnown1 - TrailZero1); 410 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth); 411 412 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) * 413 Bottom1.getLoBits(TrailBitsKnown1); 414 415 Known.resetAll(); 416 Known.Zero.setHighBits(LeadZ); 417 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown); 418 Known.One |= BottomKnown.getLoBits(ResultBitsKnown); 419 420 // Only make use of no-wrap flags if we failed to compute the sign bit 421 // directly. This matters if the multiplication always overflows, in 422 // which case we prefer to follow the result of the direct computation, 423 // though as the program is invoking undefined behaviour we can choose 424 // whatever we like here. 425 if (isKnownNonNegative && !Known.isNegative()) 426 Known.makeNonNegative(); 427 else if (isKnownNegative && !Known.isNonNegative()) 428 Known.makeNegative(); 429 } 430 431 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 432 KnownBits &Known) { 433 unsigned BitWidth = Known.getBitWidth(); 434 unsigned NumRanges = Ranges.getNumOperands() / 2; 435 assert(NumRanges >= 1); 436 437 Known.Zero.setAllBits(); 438 Known.One.setAllBits(); 439 440 for (unsigned i = 0; i < NumRanges; ++i) { 441 ConstantInt *Lower = 442 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 443 ConstantInt *Upper = 444 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 445 ConstantRange Range(Lower->getValue(), Upper->getValue()); 446 447 // The first CommonPrefixBits of all values in Range are equal. 448 unsigned CommonPrefixBits = 449 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 450 451 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 452 Known.One &= Range.getUnsignedMax() & Mask; 453 Known.Zero &= ~Range.getUnsignedMax() & Mask; 454 } 455 } 456 457 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 458 SmallVector<const Value *, 16> WorkSet(1, I); 459 SmallPtrSet<const Value *, 32> Visited; 460 SmallPtrSet<const Value *, 16> EphValues; 461 462 // The instruction defining an assumption's condition itself is always 463 // considered ephemeral to that assumption (even if it has other 464 // non-ephemeral users). See r246696's test case for an example. 465 if (is_contained(I->operands(), E)) 466 return true; 467 468 while (!WorkSet.empty()) { 469 const Value *V = WorkSet.pop_back_val(); 470 if (!Visited.insert(V).second) 471 continue; 472 473 // If all uses of this value are ephemeral, then so is this value. 474 if (llvm::all_of(V->users(), [&](const User *U) { 475 return EphValues.count(U); 476 })) { 477 if (V == E) 478 return true; 479 480 if (V == I || isSafeToSpeculativelyExecute(V)) { 481 EphValues.insert(V); 482 if (const User *U = dyn_cast<User>(V)) 483 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 484 J != JE; ++J) 485 WorkSet.push_back(*J); 486 } 487 } 488 } 489 490 return false; 491 } 492 493 // Is this an intrinsic that cannot be speculated but also cannot trap? 494 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { 495 if (const CallInst *CI = dyn_cast<CallInst>(I)) 496 if (Function *F = CI->getCalledFunction()) 497 switch (F->getIntrinsicID()) { 498 default: break; 499 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 500 case Intrinsic::assume: 501 case Intrinsic::sideeffect: 502 case Intrinsic::dbg_declare: 503 case Intrinsic::dbg_value: 504 case Intrinsic::invariant_start: 505 case Intrinsic::invariant_end: 506 case Intrinsic::lifetime_start: 507 case Intrinsic::lifetime_end: 508 case Intrinsic::objectsize: 509 case Intrinsic::ptr_annotation: 510 case Intrinsic::var_annotation: 511 return true; 512 } 513 514 return false; 515 } 516 517 bool llvm::isValidAssumeForContext(const Instruction *Inv, 518 const Instruction *CxtI, 519 const DominatorTree *DT) { 520 // There are two restrictions on the use of an assume: 521 // 1. The assume must dominate the context (or the control flow must 522 // reach the assume whenever it reaches the context). 523 // 2. The context must not be in the assume's set of ephemeral values 524 // (otherwise we will use the assume to prove that the condition 525 // feeding the assume is trivially true, thus causing the removal of 526 // the assume). 527 528 if (DT) { 529 if (DT->dominates(Inv, CxtI)) 530 return true; 531 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 532 // We don't have a DT, but this trivially dominates. 533 return true; 534 } 535 536 // With or without a DT, the only remaining case we will check is if the 537 // instructions are in the same BB. Give up if that is not the case. 538 if (Inv->getParent() != CxtI->getParent()) 539 return false; 540 541 // If we have a dom tree, then we now know that the assume doesn't dominate 542 // the other instruction. If we don't have a dom tree then we can check if 543 // the assume is first in the BB. 544 if (!DT) { 545 // Search forward from the assume until we reach the context (or the end 546 // of the block); the common case is that the assume will come first. 547 for (auto I = std::next(BasicBlock::const_iterator(Inv)), 548 IE = Inv->getParent()->end(); I != IE; ++I) 549 if (&*I == CxtI) 550 return true; 551 } 552 553 // The context comes first, but they're both in the same block. Make sure 554 // there is nothing in between that might interrupt the control flow. 555 for (BasicBlock::const_iterator I = 556 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv); 557 I != IE; ++I) 558 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 559 return false; 560 561 return !isEphemeralValueOf(Inv, CxtI); 562 } 563 564 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 565 unsigned Depth, const Query &Q) { 566 // Use of assumptions is context-sensitive. If we don't have a context, we 567 // cannot use them! 568 if (!Q.AC || !Q.CxtI) 569 return; 570 571 unsigned BitWidth = Known.getBitWidth(); 572 573 // Note that the patterns below need to be kept in sync with the code 574 // in AssumptionCache::updateAffectedValues. 575 576 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 577 if (!AssumeVH) 578 continue; 579 CallInst *I = cast<CallInst>(AssumeVH); 580 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 581 "Got assumption for the wrong function!"); 582 if (Q.isExcluded(I)) 583 continue; 584 585 // Warning: This loop can end up being somewhat performance sensitive. 586 // We're running this loop for once for each value queried resulting in a 587 // runtime of ~O(#assumes * #values). 588 589 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 590 "must be an assume intrinsic"); 591 592 Value *Arg = I->getArgOperand(0); 593 594 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 595 assert(BitWidth == 1 && "assume operand is not i1?"); 596 Known.setAllOnes(); 597 return; 598 } 599 if (match(Arg, m_Not(m_Specific(V))) && 600 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 601 assert(BitWidth == 1 && "assume operand is not i1?"); 602 Known.setAllZero(); 603 return; 604 } 605 606 // The remaining tests are all recursive, so bail out if we hit the limit. 607 if (Depth == MaxDepth) 608 continue; 609 610 Value *A, *B; 611 auto m_V = m_CombineOr(m_Specific(V), 612 m_CombineOr(m_PtrToInt(m_Specific(V)), 613 m_BitCast(m_Specific(V)))); 614 615 CmpInst::Predicate Pred; 616 uint64_t C; 617 // assume(v = a) 618 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 619 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 620 KnownBits RHSKnown(BitWidth); 621 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 622 Known.Zero |= RHSKnown.Zero; 623 Known.One |= RHSKnown.One; 624 // assume(v & b = a) 625 } else if (match(Arg, 626 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 627 Pred == ICmpInst::ICMP_EQ && 628 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 629 KnownBits RHSKnown(BitWidth); 630 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 631 KnownBits MaskKnown(BitWidth); 632 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 633 634 // For those bits in the mask that are known to be one, we can propagate 635 // known bits from the RHS to V. 636 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 637 Known.One |= RHSKnown.One & MaskKnown.One; 638 // assume(~(v & b) = a) 639 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 640 m_Value(A))) && 641 Pred == ICmpInst::ICMP_EQ && 642 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 643 KnownBits RHSKnown(BitWidth); 644 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 645 KnownBits MaskKnown(BitWidth); 646 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 647 648 // For those bits in the mask that are known to be one, we can propagate 649 // inverted known bits from the RHS to V. 650 Known.Zero |= RHSKnown.One & MaskKnown.One; 651 Known.One |= RHSKnown.Zero & MaskKnown.One; 652 // assume(v | b = a) 653 } else if (match(Arg, 654 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 655 Pred == ICmpInst::ICMP_EQ && 656 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 657 KnownBits RHSKnown(BitWidth); 658 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 659 KnownBits BKnown(BitWidth); 660 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 661 662 // For those bits in B that are known to be zero, we can propagate known 663 // bits from the RHS to V. 664 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 665 Known.One |= RHSKnown.One & BKnown.Zero; 666 // assume(~(v | b) = a) 667 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 668 m_Value(A))) && 669 Pred == ICmpInst::ICMP_EQ && 670 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 671 KnownBits RHSKnown(BitWidth); 672 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 673 KnownBits BKnown(BitWidth); 674 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 675 676 // For those bits in B that are known to be zero, we can propagate 677 // inverted known bits from the RHS to V. 678 Known.Zero |= RHSKnown.One & BKnown.Zero; 679 Known.One |= RHSKnown.Zero & BKnown.Zero; 680 // assume(v ^ b = a) 681 } else if (match(Arg, 682 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 683 Pred == ICmpInst::ICMP_EQ && 684 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 685 KnownBits RHSKnown(BitWidth); 686 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 687 KnownBits BKnown(BitWidth); 688 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 689 690 // For those bits in B that are known to be zero, we can propagate known 691 // bits from the RHS to V. For those bits in B that are known to be one, 692 // we can propagate inverted known bits from the RHS to V. 693 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 694 Known.One |= RHSKnown.One & BKnown.Zero; 695 Known.Zero |= RHSKnown.One & BKnown.One; 696 Known.One |= RHSKnown.Zero & BKnown.One; 697 // assume(~(v ^ b) = a) 698 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 699 m_Value(A))) && 700 Pred == ICmpInst::ICMP_EQ && 701 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 702 KnownBits RHSKnown(BitWidth); 703 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 704 KnownBits BKnown(BitWidth); 705 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 706 707 // For those bits in B that are known to be zero, we can propagate 708 // inverted known bits from the RHS to V. For those bits in B that are 709 // known to be one, we can propagate known bits from the RHS to V. 710 Known.Zero |= RHSKnown.One & BKnown.Zero; 711 Known.One |= RHSKnown.Zero & BKnown.Zero; 712 Known.Zero |= RHSKnown.Zero & BKnown.One; 713 Known.One |= RHSKnown.One & BKnown.One; 714 // assume(v << c = a) 715 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 716 m_Value(A))) && 717 Pred == ICmpInst::ICMP_EQ && 718 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 719 C < BitWidth) { 720 KnownBits RHSKnown(BitWidth); 721 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 722 // For those bits in RHS that are known, we can propagate them to known 723 // bits in V shifted to the right by C. 724 RHSKnown.Zero.lshrInPlace(C); 725 Known.Zero |= RHSKnown.Zero; 726 RHSKnown.One.lshrInPlace(C); 727 Known.One |= RHSKnown.One; 728 // assume(~(v << c) = a) 729 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 730 m_Value(A))) && 731 Pred == ICmpInst::ICMP_EQ && 732 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 733 C < BitWidth) { 734 KnownBits RHSKnown(BitWidth); 735 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 736 // For those bits in RHS that are known, we can propagate them inverted 737 // to known bits in V shifted to the right by C. 738 RHSKnown.One.lshrInPlace(C); 739 Known.Zero |= RHSKnown.One; 740 RHSKnown.Zero.lshrInPlace(C); 741 Known.One |= RHSKnown.Zero; 742 // assume(v >> c = a) 743 } else if (match(Arg, 744 m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), 745 m_Value(A))) && 746 Pred == ICmpInst::ICMP_EQ && 747 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 748 C < BitWidth) { 749 KnownBits RHSKnown(BitWidth); 750 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 751 // For those bits in RHS that are known, we can propagate them to known 752 // bits in V shifted to the right by C. 753 Known.Zero |= RHSKnown.Zero << C; 754 Known.One |= RHSKnown.One << C; 755 // assume(~(v >> c) = a) 756 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), 757 m_Value(A))) && 758 Pred == ICmpInst::ICMP_EQ && 759 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 760 C < BitWidth) { 761 KnownBits RHSKnown(BitWidth); 762 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 763 // For those bits in RHS that are known, we can propagate them inverted 764 // to known bits in V shifted to the right by C. 765 Known.Zero |= RHSKnown.One << C; 766 Known.One |= RHSKnown.Zero << C; 767 // assume(v >=_s c) where c is non-negative 768 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 769 Pred == ICmpInst::ICMP_SGE && 770 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 771 KnownBits RHSKnown(BitWidth); 772 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 773 774 if (RHSKnown.isNonNegative()) { 775 // We know that the sign bit is zero. 776 Known.makeNonNegative(); 777 } 778 // assume(v >_s c) where c is at least -1. 779 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 780 Pred == ICmpInst::ICMP_SGT && 781 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 782 KnownBits RHSKnown(BitWidth); 783 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 784 785 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { 786 // We know that the sign bit is zero. 787 Known.makeNonNegative(); 788 } 789 // assume(v <=_s c) where c is negative 790 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 791 Pred == ICmpInst::ICMP_SLE && 792 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 793 KnownBits RHSKnown(BitWidth); 794 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 795 796 if (RHSKnown.isNegative()) { 797 // We know that the sign bit is one. 798 Known.makeNegative(); 799 } 800 // assume(v <_s c) where c is non-positive 801 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 802 Pred == ICmpInst::ICMP_SLT && 803 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 804 KnownBits RHSKnown(BitWidth); 805 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 806 807 if (RHSKnown.isZero() || RHSKnown.isNegative()) { 808 // We know that the sign bit is one. 809 Known.makeNegative(); 810 } 811 // assume(v <=_u c) 812 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 813 Pred == ICmpInst::ICMP_ULE && 814 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 815 KnownBits RHSKnown(BitWidth); 816 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 817 818 // Whatever high bits in c are zero are known to be zero. 819 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 820 // assume(v <_u c) 821 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 822 Pred == ICmpInst::ICMP_ULT && 823 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 824 KnownBits RHSKnown(BitWidth); 825 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 826 827 // If the RHS is known zero, then this assumption must be wrong (nothing 828 // is unsigned less than zero). Signal a conflict and get out of here. 829 if (RHSKnown.isZero()) { 830 Known.Zero.setAllBits(); 831 Known.One.setAllBits(); 832 break; 833 } 834 835 // Whatever high bits in c are zero are known to be zero (if c is a power 836 // of 2, then one more). 837 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) 838 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); 839 else 840 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 841 } 842 } 843 844 // If assumptions conflict with each other or previous known bits, then we 845 // have a logical fallacy. It's possible that the assumption is not reachable, 846 // so this isn't a real bug. On the other hand, the program may have undefined 847 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 848 // clear out the known bits, try to warn the user, and hope for the best. 849 if (Known.Zero.intersects(Known.One)) { 850 Known.resetAll(); 851 852 if (Q.ORE) 853 Q.ORE->emit([&]() { 854 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 855 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", 856 CxtI) 857 << "Detected conflicting code assumptions. Program may " 858 "have undefined behavior, or compiler may have " 859 "internal error."; 860 }); 861 } 862 } 863 864 /// Compute known bits from a shift operator, including those with a 865 /// non-constant shift amount. Known is the output of this function. Known2 is a 866 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are 867 /// operator-specific functions that, given the known-zero or known-one bits 868 /// respectively, and a shift amount, compute the implied known-zero or 869 /// known-one bits of the shift operator's result respectively for that shift 870 /// amount. The results from calling KZF and KOF are conservatively combined for 871 /// all permitted shift amounts. 872 static void computeKnownBitsFromShiftOperator( 873 const Operator *I, KnownBits &Known, KnownBits &Known2, 874 unsigned Depth, const Query &Q, 875 function_ref<APInt(const APInt &, unsigned)> KZF, 876 function_ref<APInt(const APInt &, unsigned)> KOF) { 877 unsigned BitWidth = Known.getBitWidth(); 878 879 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 880 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); 881 882 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 883 Known.Zero = KZF(Known.Zero, ShiftAmt); 884 Known.One = KOF(Known.One, ShiftAmt); 885 // If the known bits conflict, this must be an overflowing left shift, so 886 // the shift result is poison. We can return anything we want. Choose 0 for 887 // the best folding opportunity. 888 if (Known.hasConflict()) 889 Known.setAllZero(); 890 891 return; 892 } 893 894 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 895 896 // If the shift amount could be greater than or equal to the bit-width of the 897 // LHS, the value could be poison, but bail out because the check below is 898 // expensive. TODO: Should we just carry on? 899 if ((~Known.Zero).uge(BitWidth)) { 900 Known.resetAll(); 901 return; 902 } 903 904 // Note: We cannot use Known.Zero.getLimitedValue() here, because if 905 // BitWidth > 64 and any upper bits are known, we'll end up returning the 906 // limit value (which implies all bits are known). 907 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); 908 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); 909 910 // It would be more-clearly correct to use the two temporaries for this 911 // calculation. Reusing the APInts here to prevent unnecessary allocations. 912 Known.resetAll(); 913 914 // If we know the shifter operand is nonzero, we can sometimes infer more 915 // known bits. However this is expensive to compute, so be lazy about it and 916 // only compute it when absolutely necessary. 917 Optional<bool> ShifterOperandIsNonZero; 918 919 // Early exit if we can't constrain any well-defined shift amount. 920 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && 921 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { 922 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q); 923 if (!*ShifterOperandIsNonZero) 924 return; 925 } 926 927 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 928 929 Known.Zero.setAllBits(); 930 Known.One.setAllBits(); 931 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 932 // Combine the shifted known input bits only for those shift amounts 933 // compatible with its known constraints. 934 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 935 continue; 936 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 937 continue; 938 // If we know the shifter is nonzero, we may be able to infer more known 939 // bits. This check is sunk down as far as possible to avoid the expensive 940 // call to isKnownNonZero if the cheaper checks above fail. 941 if (ShiftAmt == 0) { 942 if (!ShifterOperandIsNonZero.hasValue()) 943 ShifterOperandIsNonZero = 944 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 945 if (*ShifterOperandIsNonZero) 946 continue; 947 } 948 949 Known.Zero &= KZF(Known2.Zero, ShiftAmt); 950 Known.One &= KOF(Known2.One, ShiftAmt); 951 } 952 953 // If the known bits conflict, the result is poison. Return a 0 and hope the 954 // caller can further optimize that. 955 if (Known.hasConflict()) 956 Known.setAllZero(); 957 } 958 959 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known, 960 unsigned Depth, const Query &Q) { 961 unsigned BitWidth = Known.getBitWidth(); 962 963 KnownBits Known2(Known); 964 switch (I->getOpcode()) { 965 default: break; 966 case Instruction::Load: 967 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 968 computeKnownBitsFromRangeMetadata(*MD, Known); 969 break; 970 case Instruction::And: { 971 // If either the LHS or the RHS are Zero, the result is zero. 972 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 973 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 974 975 // Output known-1 bits are only known if set in both the LHS & RHS. 976 Known.One &= Known2.One; 977 // Output known-0 are known to be clear if zero in either the LHS | RHS. 978 Known.Zero |= Known2.Zero; 979 980 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 981 // here we handle the more general case of adding any odd number by 982 // matching the form add(x, add(x, y)) where y is odd. 983 // TODO: This could be generalized to clearing any bit set in y where the 984 // following bit is known to be unset in y. 985 Value *Y = nullptr; 986 if (!Known.Zero[0] && !Known.One[0] && 987 (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)), 988 m_Value(Y))) || 989 match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)), 990 m_Value(Y))))) { 991 Known2.resetAll(); 992 computeKnownBits(Y, Known2, Depth + 1, Q); 993 if (Known2.countMinTrailingOnes() > 0) 994 Known.Zero.setBit(0); 995 } 996 break; 997 } 998 case Instruction::Or: 999 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 1000 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1001 1002 // Output known-0 bits are only known if clear in both the LHS & RHS. 1003 Known.Zero &= Known2.Zero; 1004 // Output known-1 are known to be set if set in either the LHS | RHS. 1005 Known.One |= Known2.One; 1006 break; 1007 case Instruction::Xor: { 1008 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 1009 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1010 1011 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1012 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 1013 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1014 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 1015 Known.Zero = std::move(KnownZeroOut); 1016 break; 1017 } 1018 case Instruction::Mul: { 1019 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1020 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known, 1021 Known2, Depth, Q); 1022 break; 1023 } 1024 case Instruction::UDiv: { 1025 // For the purposes of computing leading zeros we can conservatively 1026 // treat a udiv as a logical right shift by the power of 2 known to 1027 // be less than the denominator. 1028 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1029 unsigned LeadZ = Known2.countMinLeadingZeros(); 1030 1031 Known2.resetAll(); 1032 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1033 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 1034 if (RHSMaxLeadingZeros != BitWidth) 1035 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 1036 1037 Known.Zero.setHighBits(LeadZ); 1038 break; 1039 } 1040 case Instruction::Select: { 1041 const Value *LHS, *RHS; 1042 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 1043 if (SelectPatternResult::isMinOrMax(SPF)) { 1044 computeKnownBits(RHS, Known, Depth + 1, Q); 1045 computeKnownBits(LHS, Known2, Depth + 1, Q); 1046 } else { 1047 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 1048 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1049 } 1050 1051 unsigned MaxHighOnes = 0; 1052 unsigned MaxHighZeros = 0; 1053 if (SPF == SPF_SMAX) { 1054 // If both sides are negative, the result is negative. 1055 if (Known.isNegative() && Known2.isNegative()) 1056 // We can derive a lower bound on the result by taking the max of the 1057 // leading one bits. 1058 MaxHighOnes = 1059 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1060 // If either side is non-negative, the result is non-negative. 1061 else if (Known.isNonNegative() || Known2.isNonNegative()) 1062 MaxHighZeros = 1; 1063 } else if (SPF == SPF_SMIN) { 1064 // If both sides are non-negative, the result is non-negative. 1065 if (Known.isNonNegative() && Known2.isNonNegative()) 1066 // We can derive an upper bound on the result by taking the max of the 1067 // leading zero bits. 1068 MaxHighZeros = std::max(Known.countMinLeadingZeros(), 1069 Known2.countMinLeadingZeros()); 1070 // If either side is negative, the result is negative. 1071 else if (Known.isNegative() || Known2.isNegative()) 1072 MaxHighOnes = 1; 1073 } else if (SPF == SPF_UMAX) { 1074 // We can derive a lower bound on the result by taking the max of the 1075 // leading one bits. 1076 MaxHighOnes = 1077 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1078 } else if (SPF == SPF_UMIN) { 1079 // We can derive an upper bound on the result by taking the max of the 1080 // leading zero bits. 1081 MaxHighZeros = 1082 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1083 } 1084 1085 // Only known if known in both the LHS and RHS. 1086 Known.One &= Known2.One; 1087 Known.Zero &= Known2.Zero; 1088 if (MaxHighOnes > 0) 1089 Known.One.setHighBits(MaxHighOnes); 1090 if (MaxHighZeros > 0) 1091 Known.Zero.setHighBits(MaxHighZeros); 1092 break; 1093 } 1094 case Instruction::FPTrunc: 1095 case Instruction::FPExt: 1096 case Instruction::FPToUI: 1097 case Instruction::FPToSI: 1098 case Instruction::SIToFP: 1099 case Instruction::UIToFP: 1100 break; // Can't work with floating point. 1101 case Instruction::PtrToInt: 1102 case Instruction::IntToPtr: 1103 // Fall through and handle them the same as zext/trunc. 1104 LLVM_FALLTHROUGH; 1105 case Instruction::ZExt: 1106 case Instruction::Trunc: { 1107 Type *SrcTy = I->getOperand(0)->getType(); 1108 1109 unsigned SrcBitWidth; 1110 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1111 // which fall through here. 1112 Type *ScalarTy = SrcTy->getScalarType(); 1113 SrcBitWidth = ScalarTy->isPointerTy() ? 1114 Q.DL.getIndexTypeSizeInBits(ScalarTy) : 1115 Q.DL.getTypeSizeInBits(ScalarTy); 1116 1117 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1118 Known = Known.zextOrTrunc(SrcBitWidth); 1119 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1120 Known = Known.zextOrTrunc(BitWidth); 1121 // Any top bits are known to be zero. 1122 if (BitWidth > SrcBitWidth) 1123 Known.Zero.setBitsFrom(SrcBitWidth); 1124 break; 1125 } 1126 case Instruction::BitCast: { 1127 Type *SrcTy = I->getOperand(0)->getType(); 1128 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 1129 // TODO: For now, not handling conversions like: 1130 // (bitcast i64 %x to <2 x i32>) 1131 !I->getType()->isVectorTy()) { 1132 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1133 break; 1134 } 1135 break; 1136 } 1137 case Instruction::SExt: { 1138 // Compute the bits in the result that are not present in the input. 1139 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1140 1141 Known = Known.trunc(SrcBitWidth); 1142 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1143 // If the sign bit of the input is known set or clear, then we know the 1144 // top bits of the result. 1145 Known = Known.sext(BitWidth); 1146 break; 1147 } 1148 case Instruction::Shl: { 1149 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1150 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1151 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) { 1152 APInt KZResult = KnownZero << ShiftAmt; 1153 KZResult.setLowBits(ShiftAmt); // Low bits known 0. 1154 // If this shift has "nsw" keyword, then the result is either a poison 1155 // value or has the same sign bit as the first operand. 1156 if (NSW && KnownZero.isSignBitSet()) 1157 KZResult.setSignBit(); 1158 return KZResult; 1159 }; 1160 1161 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) { 1162 APInt KOResult = KnownOne << ShiftAmt; 1163 if (NSW && KnownOne.isSignBitSet()) 1164 KOResult.setSignBit(); 1165 return KOResult; 1166 }; 1167 1168 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1169 break; 1170 } 1171 case Instruction::LShr: { 1172 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1173 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1174 APInt KZResult = KnownZero.lshr(ShiftAmt); 1175 // High bits known zero. 1176 KZResult.setHighBits(ShiftAmt); 1177 return KZResult; 1178 }; 1179 1180 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1181 return KnownOne.lshr(ShiftAmt); 1182 }; 1183 1184 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1185 break; 1186 } 1187 case Instruction::AShr: { 1188 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1189 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1190 return KnownZero.ashr(ShiftAmt); 1191 }; 1192 1193 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1194 return KnownOne.ashr(ShiftAmt); 1195 }; 1196 1197 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1198 break; 1199 } 1200 case Instruction::Sub: { 1201 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1202 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1203 Known, Known2, Depth, Q); 1204 break; 1205 } 1206 case Instruction::Add: { 1207 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1208 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1209 Known, Known2, Depth, Q); 1210 break; 1211 } 1212 case Instruction::SRem: 1213 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1214 APInt RA = Rem->getValue().abs(); 1215 if (RA.isPowerOf2()) { 1216 APInt LowBits = RA - 1; 1217 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1218 1219 // The low bits of the first operand are unchanged by the srem. 1220 Known.Zero = Known2.Zero & LowBits; 1221 Known.One = Known2.One & LowBits; 1222 1223 // If the first operand is non-negative or has all low bits zero, then 1224 // the upper bits are all zero. 1225 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 1226 Known.Zero |= ~LowBits; 1227 1228 // If the first operand is negative and not all low bits are zero, then 1229 // the upper bits are all one. 1230 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 1231 Known.One |= ~LowBits; 1232 1233 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1234 break; 1235 } 1236 } 1237 1238 // The sign bit is the LHS's sign bit, except when the result of the 1239 // remainder is zero. 1240 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1241 // If it's known zero, our sign bit is also zero. 1242 if (Known2.isNonNegative()) 1243 Known.makeNonNegative(); 1244 1245 break; 1246 case Instruction::URem: { 1247 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1248 const APInt &RA = Rem->getValue(); 1249 if (RA.isPowerOf2()) { 1250 APInt LowBits = (RA - 1); 1251 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1252 Known.Zero |= ~LowBits; 1253 Known.One &= LowBits; 1254 break; 1255 } 1256 } 1257 1258 // Since the result is less than or equal to either operand, any leading 1259 // zero bits in either operand must also exist in the result. 1260 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1261 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1262 1263 unsigned Leaders = 1264 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1265 Known.resetAll(); 1266 Known.Zero.setHighBits(Leaders); 1267 break; 1268 } 1269 1270 case Instruction::Alloca: { 1271 const AllocaInst *AI = cast<AllocaInst>(I); 1272 unsigned Align = AI->getAlignment(); 1273 if (Align == 0) 1274 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); 1275 1276 if (Align > 0) 1277 Known.Zero.setLowBits(countTrailingZeros(Align)); 1278 break; 1279 } 1280 case Instruction::GetElementPtr: { 1281 // Analyze all of the subscripts of this getelementptr instruction 1282 // to determine if we can prove known low zero bits. 1283 KnownBits LocalKnown(BitWidth); 1284 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q); 1285 unsigned TrailZ = LocalKnown.countMinTrailingZeros(); 1286 1287 gep_type_iterator GTI = gep_type_begin(I); 1288 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1289 Value *Index = I->getOperand(i); 1290 if (StructType *STy = GTI.getStructTypeOrNull()) { 1291 // Handle struct member offset arithmetic. 1292 1293 // Handle case when index is vector zeroinitializer 1294 Constant *CIndex = cast<Constant>(Index); 1295 if (CIndex->isZeroValue()) 1296 continue; 1297 1298 if (CIndex->getType()->isVectorTy()) 1299 Index = CIndex->getSplatValue(); 1300 1301 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1302 const StructLayout *SL = Q.DL.getStructLayout(STy); 1303 uint64_t Offset = SL->getElementOffset(Idx); 1304 TrailZ = std::min<unsigned>(TrailZ, 1305 countTrailingZeros(Offset)); 1306 } else { 1307 // Handle array index arithmetic. 1308 Type *IndexedTy = GTI.getIndexedType(); 1309 if (!IndexedTy->isSized()) { 1310 TrailZ = 0; 1311 break; 1312 } 1313 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1314 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1315 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0); 1316 computeKnownBits(Index, LocalKnown, Depth + 1, Q); 1317 TrailZ = std::min(TrailZ, 1318 unsigned(countTrailingZeros(TypeSize) + 1319 LocalKnown.countMinTrailingZeros())); 1320 } 1321 } 1322 1323 Known.Zero.setLowBits(TrailZ); 1324 break; 1325 } 1326 case Instruction::PHI: { 1327 const PHINode *P = cast<PHINode>(I); 1328 // Handle the case of a simple two-predecessor recurrence PHI. 1329 // There's a lot more that could theoretically be done here, but 1330 // this is sufficient to catch some interesting cases. 1331 if (P->getNumIncomingValues() == 2) { 1332 for (unsigned i = 0; i != 2; ++i) { 1333 Value *L = P->getIncomingValue(i); 1334 Value *R = P->getIncomingValue(!i); 1335 Operator *LU = dyn_cast<Operator>(L); 1336 if (!LU) 1337 continue; 1338 unsigned Opcode = LU->getOpcode(); 1339 // Check for operations that have the property that if 1340 // both their operands have low zero bits, the result 1341 // will have low zero bits. 1342 if (Opcode == Instruction::Add || 1343 Opcode == Instruction::Sub || 1344 Opcode == Instruction::And || 1345 Opcode == Instruction::Or || 1346 Opcode == Instruction::Mul) { 1347 Value *LL = LU->getOperand(0); 1348 Value *LR = LU->getOperand(1); 1349 // Find a recurrence. 1350 if (LL == I) 1351 L = LR; 1352 else if (LR == I) 1353 L = LL; 1354 else 1355 break; 1356 // Ok, we have a PHI of the form L op= R. Check for low 1357 // zero bits. 1358 computeKnownBits(R, Known2, Depth + 1, Q); 1359 1360 // We need to take the minimum number of known bits 1361 KnownBits Known3(Known); 1362 computeKnownBits(L, Known3, Depth + 1, Q); 1363 1364 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1365 Known3.countMinTrailingZeros())); 1366 1367 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU); 1368 if (OverflowOp && OverflowOp->hasNoSignedWrap()) { 1369 // If initial value of recurrence is nonnegative, and we are adding 1370 // a nonnegative number with nsw, the result can only be nonnegative 1371 // or poison value regardless of the number of times we execute the 1372 // add in phi recurrence. If initial value is negative and we are 1373 // adding a negative number with nsw, the result can only be 1374 // negative or poison value. Similar arguments apply to sub and mul. 1375 // 1376 // (add non-negative, non-negative) --> non-negative 1377 // (add negative, negative) --> negative 1378 if (Opcode == Instruction::Add) { 1379 if (Known2.isNonNegative() && Known3.isNonNegative()) 1380 Known.makeNonNegative(); 1381 else if (Known2.isNegative() && Known3.isNegative()) 1382 Known.makeNegative(); 1383 } 1384 1385 // (sub nsw non-negative, negative) --> non-negative 1386 // (sub nsw negative, non-negative) --> negative 1387 else if (Opcode == Instruction::Sub && LL == I) { 1388 if (Known2.isNonNegative() && Known3.isNegative()) 1389 Known.makeNonNegative(); 1390 else if (Known2.isNegative() && Known3.isNonNegative()) 1391 Known.makeNegative(); 1392 } 1393 1394 // (mul nsw non-negative, non-negative) --> non-negative 1395 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1396 Known3.isNonNegative()) 1397 Known.makeNonNegative(); 1398 } 1399 1400 break; 1401 } 1402 } 1403 } 1404 1405 // Unreachable blocks may have zero-operand PHI nodes. 1406 if (P->getNumIncomingValues() == 0) 1407 break; 1408 1409 // Otherwise take the unions of the known bit sets of the operands, 1410 // taking conservative care to avoid excessive recursion. 1411 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) { 1412 // Skip if every incoming value references to ourself. 1413 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1414 break; 1415 1416 Known.Zero.setAllBits(); 1417 Known.One.setAllBits(); 1418 for (Value *IncValue : P->incoming_values()) { 1419 // Skip direct self references. 1420 if (IncValue == P) continue; 1421 1422 Known2 = KnownBits(BitWidth); 1423 // Recurse, but cap the recursion to one level, because we don't 1424 // want to waste time spinning around in loops. 1425 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q); 1426 Known.Zero &= Known2.Zero; 1427 Known.One &= Known2.One; 1428 // If all bits have been ruled out, there's no need to check 1429 // more operands. 1430 if (!Known.Zero && !Known.One) 1431 break; 1432 } 1433 } 1434 break; 1435 } 1436 case Instruction::Call: 1437 case Instruction::Invoke: 1438 // If range metadata is attached to this call, set known bits from that, 1439 // and then intersect with known bits based on other properties of the 1440 // function. 1441 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range)) 1442 computeKnownBitsFromRangeMetadata(*MD, Known); 1443 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) { 1444 computeKnownBits(RV, Known2, Depth + 1, Q); 1445 Known.Zero |= Known2.Zero; 1446 Known.One |= Known2.One; 1447 } 1448 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1449 switch (II->getIntrinsicID()) { 1450 default: break; 1451 case Intrinsic::bitreverse: 1452 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1453 Known.Zero |= Known2.Zero.reverseBits(); 1454 Known.One |= Known2.One.reverseBits(); 1455 break; 1456 case Intrinsic::bswap: 1457 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1458 Known.Zero |= Known2.Zero.byteSwap(); 1459 Known.One |= Known2.One.byteSwap(); 1460 break; 1461 case Intrinsic::ctlz: { 1462 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1463 // If we have a known 1, its position is our upper bound. 1464 unsigned PossibleLZ = Known2.One.countLeadingZeros(); 1465 // If this call is undefined for 0, the result will be less than 2^n. 1466 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1467 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1468 unsigned LowBits = Log2_32(PossibleLZ)+1; 1469 Known.Zero.setBitsFrom(LowBits); 1470 break; 1471 } 1472 case Intrinsic::cttz: { 1473 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1474 // If we have a known 1, its position is our upper bound. 1475 unsigned PossibleTZ = Known2.One.countTrailingZeros(); 1476 // If this call is undefined for 0, the result will be less than 2^n. 1477 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1478 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1479 unsigned LowBits = Log2_32(PossibleTZ)+1; 1480 Known.Zero.setBitsFrom(LowBits); 1481 break; 1482 } 1483 case Intrinsic::ctpop: { 1484 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1485 // We can bound the space the count needs. Also, bits known to be zero 1486 // can't contribute to the population. 1487 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1488 unsigned LowBits = Log2_32(BitsPossiblySet)+1; 1489 Known.Zero.setBitsFrom(LowBits); 1490 // TODO: we could bound KnownOne using the lower bound on the number 1491 // of bits which might be set provided by popcnt KnownOne2. 1492 break; 1493 } 1494 case Intrinsic::x86_sse42_crc32_64_64: 1495 Known.Zero.setBitsFrom(32); 1496 break; 1497 } 1498 } 1499 break; 1500 case Instruction::ExtractElement: 1501 // Look through extract element. At the moment we keep this simple and skip 1502 // tracking the specific element. But at least we might find information 1503 // valid for all elements of the vector (for example if vector is sign 1504 // extended, shifted, etc). 1505 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1506 break; 1507 case Instruction::ExtractValue: 1508 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1509 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1510 if (EVI->getNumIndices() != 1) break; 1511 if (EVI->getIndices()[0] == 0) { 1512 switch (II->getIntrinsicID()) { 1513 default: break; 1514 case Intrinsic::uadd_with_overflow: 1515 case Intrinsic::sadd_with_overflow: 1516 computeKnownBitsAddSub(true, II->getArgOperand(0), 1517 II->getArgOperand(1), false, Known, Known2, 1518 Depth, Q); 1519 break; 1520 case Intrinsic::usub_with_overflow: 1521 case Intrinsic::ssub_with_overflow: 1522 computeKnownBitsAddSub(false, II->getArgOperand(0), 1523 II->getArgOperand(1), false, Known, Known2, 1524 Depth, Q); 1525 break; 1526 case Intrinsic::umul_with_overflow: 1527 case Intrinsic::smul_with_overflow: 1528 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1529 Known, Known2, Depth, Q); 1530 break; 1531 } 1532 } 1533 } 1534 } 1535 } 1536 1537 /// Determine which bits of V are known to be either zero or one and return 1538 /// them. 1539 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { 1540 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1541 computeKnownBits(V, Known, Depth, Q); 1542 return Known; 1543 } 1544 1545 /// Determine which bits of V are known to be either zero or one and return 1546 /// them in the Known bit set. 1547 /// 1548 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1549 /// we cannot optimize based on the assumption that it is zero without changing 1550 /// it to be an explicit zero. If we don't change it to zero, other code could 1551 /// optimized based on the contradictory assumption that it is non-zero. 1552 /// Because instcombine aggressively folds operations with undef args anyway, 1553 /// this won't lose us code quality. 1554 /// 1555 /// This function is defined on values with integer type, values with pointer 1556 /// type, and vectors of integers. In the case 1557 /// where V is a vector, known zero, and known one values are the 1558 /// same width as the vector element, and the bit is set only if it is true 1559 /// for all of the elements in the vector. 1560 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 1561 const Query &Q) { 1562 assert(V && "No Value?"); 1563 assert(Depth <= MaxDepth && "Limit Search Depth"); 1564 unsigned BitWidth = Known.getBitWidth(); 1565 1566 assert((V->getType()->isIntOrIntVectorTy(BitWidth) || 1567 V->getType()->isPtrOrPtrVectorTy()) && 1568 "Not integer or pointer type!"); 1569 1570 Type *ScalarTy = V->getType()->getScalarType(); 1571 unsigned ExpectedWidth = ScalarTy->isPointerTy() ? 1572 Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); 1573 assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth"); 1574 (void)BitWidth; 1575 (void)ExpectedWidth; 1576 1577 const APInt *C; 1578 if (match(V, m_APInt(C))) { 1579 // We know all of the bits for a scalar constant or a splat vector constant! 1580 Known.One = *C; 1581 Known.Zero = ~Known.One; 1582 return; 1583 } 1584 // Null and aggregate-zero are all-zeros. 1585 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1586 Known.setAllZero(); 1587 return; 1588 } 1589 // Handle a constant vector by taking the intersection of the known bits of 1590 // each element. 1591 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 1592 // We know that CDS must be a vector of integers. Take the intersection of 1593 // each element. 1594 Known.Zero.setAllBits(); Known.One.setAllBits(); 1595 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1596 APInt Elt = CDS->getElementAsAPInt(i); 1597 Known.Zero &= ~Elt; 1598 Known.One &= Elt; 1599 } 1600 return; 1601 } 1602 1603 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1604 // We know that CV must be a vector of integers. Take the intersection of 1605 // each element. 1606 Known.Zero.setAllBits(); Known.One.setAllBits(); 1607 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1608 Constant *Element = CV->getAggregateElement(i); 1609 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1610 if (!ElementCI) { 1611 Known.resetAll(); 1612 return; 1613 } 1614 const APInt &Elt = ElementCI->getValue(); 1615 Known.Zero &= ~Elt; 1616 Known.One &= Elt; 1617 } 1618 return; 1619 } 1620 1621 // Start out not knowing anything. 1622 Known.resetAll(); 1623 1624 // We can't imply anything about undefs. 1625 if (isa<UndefValue>(V)) 1626 return; 1627 1628 // There's no point in looking through other users of ConstantData for 1629 // assumptions. Confirm that we've handled them all. 1630 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1631 1632 // Limit search depth. 1633 // All recursive calls that increase depth must come after this. 1634 if (Depth == MaxDepth) 1635 return; 1636 1637 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1638 // the bits of its aliasee. 1639 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1640 if (!GA->isInterposable()) 1641 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 1642 return; 1643 } 1644 1645 if (const Operator *I = dyn_cast<Operator>(V)) 1646 computeKnownBitsFromOperator(I, Known, Depth, Q); 1647 1648 // Aligned pointers have trailing zeros - refine Known.Zero set 1649 if (V->getType()->isPointerTy()) { 1650 unsigned Align = V->getPointerAlignment(Q.DL); 1651 if (Align) 1652 Known.Zero.setLowBits(countTrailingZeros(Align)); 1653 } 1654 1655 // computeKnownBitsFromAssume strictly refines Known. 1656 // Therefore, we run them after computeKnownBitsFromOperator. 1657 1658 // Check whether a nearby assume intrinsic can determine some known bits. 1659 computeKnownBitsFromAssume(V, Known, Depth, Q); 1660 1661 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1662 } 1663 1664 /// Return true if the given value is known to have exactly one 1665 /// bit set when defined. For vectors return true if every element is known to 1666 /// be a power of two when defined. Supports values with integer or pointer 1667 /// types and vectors of integers. 1668 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 1669 const Query &Q) { 1670 assert(Depth <= MaxDepth && "Limit Search Depth"); 1671 1672 // Attempt to match against constants. 1673 if (OrZero && match(V, m_Power2OrZero())) 1674 return true; 1675 if (match(V, m_Power2())) 1676 return true; 1677 1678 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1679 // it is shifted off the end then the result is undefined. 1680 if (match(V, m_Shl(m_One(), m_Value()))) 1681 return true; 1682 1683 // (signmask) >>l X is clearly a power of two if the one is not shifted off 1684 // the bottom. If it is shifted off the bottom then the result is undefined. 1685 if (match(V, m_LShr(m_SignMask(), m_Value()))) 1686 return true; 1687 1688 // The remaining tests are all recursive, so bail out if we hit the limit. 1689 if (Depth++ == MaxDepth) 1690 return false; 1691 1692 Value *X = nullptr, *Y = nullptr; 1693 // A shift left or a logical shift right of a power of two is a power of two 1694 // or zero. 1695 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1696 match(V, m_LShr(m_Value(X), m_Value())))) 1697 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 1698 1699 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1700 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 1701 1702 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 1703 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 1704 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 1705 1706 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1707 // A power of two and'd with anything is a power of two or zero. 1708 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 1709 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 1710 return true; 1711 // X & (-X) is always a power of two or zero. 1712 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1713 return true; 1714 return false; 1715 } 1716 1717 // Adding a power-of-two or zero to the same power-of-two or zero yields 1718 // either the original power-of-two, a larger power-of-two or zero. 1719 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1720 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1721 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { 1722 if (match(X, m_And(m_Specific(Y), m_Value())) || 1723 match(X, m_And(m_Value(), m_Specific(Y)))) 1724 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 1725 return true; 1726 if (match(Y, m_And(m_Specific(X), m_Value())) || 1727 match(Y, m_And(m_Value(), m_Specific(X)))) 1728 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 1729 return true; 1730 1731 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1732 KnownBits LHSBits(BitWidth); 1733 computeKnownBits(X, LHSBits, Depth, Q); 1734 1735 KnownBits RHSBits(BitWidth); 1736 computeKnownBits(Y, RHSBits, Depth, Q); 1737 // If i8 V is a power of two or zero: 1738 // ZeroBits: 1 1 1 0 1 1 1 1 1739 // ~ZeroBits: 0 0 0 1 0 0 0 0 1740 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 1741 // If OrZero isn't set, we cannot give back a zero result. 1742 // Make sure either the LHS or RHS has a bit set. 1743 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 1744 return true; 1745 } 1746 } 1747 1748 // An exact divide or right shift can only shift off zero bits, so the result 1749 // is a power of two only if the first operand is a power of two and not 1750 // copying a sign bit (sdiv int_min, 2). 1751 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1752 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1753 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1754 Depth, Q); 1755 } 1756 1757 return false; 1758 } 1759 1760 /// \brief Test whether a GEP's result is known to be non-null. 1761 /// 1762 /// Uses properties inherent in a GEP to try to determine whether it is known 1763 /// to be non-null. 1764 /// 1765 /// Currently this routine does not support vector GEPs. 1766 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 1767 const Query &Q) { 1768 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) 1769 return false; 1770 1771 // FIXME: Support vector-GEPs. 1772 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1773 1774 // If the base pointer is non-null, we cannot walk to a null address with an 1775 // inbounds GEP in address space zero. 1776 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 1777 return true; 1778 1779 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1780 // If so, then the GEP cannot produce a null pointer, as doing so would 1781 // inherently violate the inbounds contract within address space zero. 1782 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1783 GTI != GTE; ++GTI) { 1784 // Struct types are easy -- they must always be indexed by a constant. 1785 if (StructType *STy = GTI.getStructTypeOrNull()) { 1786 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1787 unsigned ElementIdx = OpC->getZExtValue(); 1788 const StructLayout *SL = Q.DL.getStructLayout(STy); 1789 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1790 if (ElementOffset > 0) 1791 return true; 1792 continue; 1793 } 1794 1795 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1796 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1797 continue; 1798 1799 // Fast path the constant operand case both for efficiency and so we don't 1800 // increment Depth when just zipping down an all-constant GEP. 1801 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1802 if (!OpC->isZero()) 1803 return true; 1804 continue; 1805 } 1806 1807 // We post-increment Depth here because while isKnownNonZero increments it 1808 // as well, when we pop back up that increment won't persist. We don't want 1809 // to recurse 10k times just because we have 10k GEP operands. We don't 1810 // bail completely out because we want to handle constant GEPs regardless 1811 // of depth. 1812 if (Depth++ >= MaxDepth) 1813 continue; 1814 1815 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 1816 return true; 1817 } 1818 1819 return false; 1820 } 1821 1822 static bool isKnownNonNullFromDominatingCondition(const Value *V, 1823 const Instruction *CtxI, 1824 const DominatorTree *DT) { 1825 assert(V->getType()->isPointerTy() && "V must be pointer type"); 1826 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull"); 1827 1828 if (!CtxI || !DT) 1829 return false; 1830 1831 unsigned NumUsesExplored = 0; 1832 for (auto *U : V->users()) { 1833 // Avoid massive lists 1834 if (NumUsesExplored >= DomConditionsMaxUses) 1835 break; 1836 NumUsesExplored++; 1837 1838 // If the value is used as an argument to a call or invoke, then argument 1839 // attributes may provide an answer about null-ness. 1840 if (auto CS = ImmutableCallSite(U)) 1841 if (auto *CalledFunc = CS.getCalledFunction()) 1842 for (const Argument &Arg : CalledFunc->args()) 1843 if (CS.getArgOperand(Arg.getArgNo()) == V && 1844 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) 1845 return true; 1846 1847 // Consider only compare instructions uniquely controlling a branch 1848 CmpInst::Predicate Pred; 1849 if (!match(const_cast<User *>(U), 1850 m_c_ICmp(Pred, m_Specific(V), m_Zero())) || 1851 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) 1852 continue; 1853 1854 for (auto *CmpU : U->users()) { 1855 if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) { 1856 assert(BI->isConditional() && "uses a comparison!"); 1857 1858 BasicBlock *NonNullSuccessor = 1859 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); 1860 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 1861 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 1862 return true; 1863 } else if (Pred == ICmpInst::ICMP_NE && 1864 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) && 1865 DT->dominates(cast<Instruction>(CmpU), CtxI)) { 1866 return true; 1867 } 1868 } 1869 } 1870 1871 return false; 1872 } 1873 1874 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1875 /// ensure that the value it's attached to is never Value? 'RangeType' is 1876 /// is the type of the value described by the range. 1877 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 1878 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1879 assert(NumRanges >= 1); 1880 for (unsigned i = 0; i < NumRanges; ++i) { 1881 ConstantInt *Lower = 1882 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1883 ConstantInt *Upper = 1884 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1885 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1886 if (Range.contains(Value)) 1887 return false; 1888 } 1889 return true; 1890 } 1891 1892 /// Return true if the given value is known to be non-zero when defined. For 1893 /// vectors, return true if every element is known to be non-zero when 1894 /// defined. For pointers, if the context instruction and dominator tree are 1895 /// specified, perform context-sensitive analysis and return true if the 1896 /// pointer couldn't possibly be null at the specified instruction. 1897 /// Supports values with integer or pointer type and vectors of integers. 1898 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) { 1899 if (auto *C = dyn_cast<Constant>(V)) { 1900 if (C->isNullValue()) 1901 return false; 1902 if (isa<ConstantInt>(C)) 1903 // Must be non-zero due to null test above. 1904 return true; 1905 1906 // For constant vectors, check that all elements are undefined or known 1907 // non-zero to determine that the whole vector is known non-zero. 1908 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) { 1909 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 1910 Constant *Elt = C->getAggregateElement(i); 1911 if (!Elt || Elt->isNullValue()) 1912 return false; 1913 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 1914 return false; 1915 } 1916 return true; 1917 } 1918 1919 // A global variable in address space 0 is non null unless extern weak 1920 // or an absolute symbol reference. Other address spaces may have null as a 1921 // valid address for a global, so we can't assume anything. 1922 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 1923 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 1924 GV->getType()->getAddressSpace() == 0) 1925 return true; 1926 } else 1927 return false; 1928 } 1929 1930 if (auto *I = dyn_cast<Instruction>(V)) { 1931 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) { 1932 // If the possible ranges don't contain zero, then the value is 1933 // definitely non-zero. 1934 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 1935 const APInt ZeroValue(Ty->getBitWidth(), 0); 1936 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 1937 return true; 1938 } 1939 } 1940 } 1941 1942 // Check for pointer simplifications. 1943 if (V->getType()->isPointerTy()) { 1944 // Alloca never returns null, malloc might. 1945 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) 1946 return true; 1947 1948 // A byval, inalloca, or nonnull argument is never null. 1949 if (const Argument *A = dyn_cast<Argument>(V)) 1950 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr()) 1951 return true; 1952 1953 // A Load tagged with nonnull metadata is never null. 1954 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 1955 if (LI->getMetadata(LLVMContext::MD_nonnull)) 1956 return true; 1957 1958 if (auto CS = ImmutableCallSite(V)) 1959 if (CS.isReturnNonNull()) 1960 return true; 1961 } 1962 1963 // The remaining tests are all recursive, so bail out if we hit the limit. 1964 if (Depth++ >= MaxDepth) 1965 return false; 1966 1967 // Check for recursive pointer simplifications. 1968 if (V->getType()->isPointerTy()) { 1969 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) 1970 return true; 1971 1972 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 1973 if (isGEPKnownNonNull(GEP, Depth, Q)) 1974 return true; 1975 } 1976 1977 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 1978 1979 // X | Y != 0 if X != 0 or Y != 0. 1980 Value *X = nullptr, *Y = nullptr; 1981 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 1982 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q); 1983 1984 // ext X != 0 if X != 0. 1985 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 1986 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 1987 1988 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 1989 // if the lowest bit is shifted off the end. 1990 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { 1991 // shl nuw can't remove any non-zero bits. 1992 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1993 if (BO->hasNoUnsignedWrap()) 1994 return isKnownNonZero(X, Depth, Q); 1995 1996 KnownBits Known(BitWidth); 1997 computeKnownBits(X, Known, Depth, Q); 1998 if (Known.One[0]) 1999 return true; 2000 } 2001 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 2002 // defined if the sign bit is shifted off the end. 2003 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 2004 // shr exact can only shift out zero bits. 2005 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 2006 if (BO->isExact()) 2007 return isKnownNonZero(X, Depth, Q); 2008 2009 KnownBits Known = computeKnownBits(X, Depth, Q); 2010 if (Known.isNegative()) 2011 return true; 2012 2013 // If the shifter operand is a constant, and all of the bits shifted 2014 // out are known to be zero, and X is known non-zero then at least one 2015 // non-zero bit must remain. 2016 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 2017 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 2018 // Is there a known one in the portion not shifted out? 2019 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) 2020 return true; 2021 // Are all the bits to be shifted out known zero? 2022 if (Known.countMinTrailingZeros() >= ShiftVal) 2023 return isKnownNonZero(X, Depth, Q); 2024 } 2025 } 2026 // div exact can only produce a zero if the dividend is zero. 2027 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 2028 return isKnownNonZero(X, Depth, Q); 2029 } 2030 // X + Y. 2031 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2032 KnownBits XKnown = computeKnownBits(X, Depth, Q); 2033 KnownBits YKnown = computeKnownBits(Y, Depth, Q); 2034 2035 // If X and Y are both non-negative (as signed values) then their sum is not 2036 // zero unless both X and Y are zero. 2037 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 2038 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q)) 2039 return true; 2040 2041 // If X and Y are both negative (as signed values) then their sum is not 2042 // zero unless both X and Y equal INT_MIN. 2043 if (XKnown.isNegative() && YKnown.isNegative()) { 2044 APInt Mask = APInt::getSignedMaxValue(BitWidth); 2045 // The sign bit of X is set. If some other bit is set then X is not equal 2046 // to INT_MIN. 2047 if (XKnown.One.intersects(Mask)) 2048 return true; 2049 // The sign bit of Y is set. If some other bit is set then Y is not equal 2050 // to INT_MIN. 2051 if (YKnown.One.intersects(Mask)) 2052 return true; 2053 } 2054 2055 // The sum of a non-negative number and a power of two is not zero. 2056 if (XKnown.isNonNegative() && 2057 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 2058 return true; 2059 if (YKnown.isNonNegative() && 2060 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 2061 return true; 2062 } 2063 // X * Y. 2064 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 2065 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2066 // If X and Y are non-zero then so is X * Y as long as the multiplication 2067 // does not overflow. 2068 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 2069 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q)) 2070 return true; 2071 } 2072 // (C ? X : Y) != 0 if X != 0 and Y != 0. 2073 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 2074 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) && 2075 isKnownNonZero(SI->getFalseValue(), Depth, Q)) 2076 return true; 2077 } 2078 // PHI 2079 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 2080 // Try and detect a recurrence that monotonically increases from a 2081 // starting value, as these are common as induction variables. 2082 if (PN->getNumIncomingValues() == 2) { 2083 Value *Start = PN->getIncomingValue(0); 2084 Value *Induction = PN->getIncomingValue(1); 2085 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 2086 std::swap(Start, Induction); 2087 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 2088 if (!C->isZero() && !C->isNegative()) { 2089 ConstantInt *X; 2090 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 2091 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 2092 !X->isNegative()) 2093 return true; 2094 } 2095 } 2096 } 2097 // Check if all incoming values are non-zero constant. 2098 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) { 2099 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero(); 2100 }); 2101 if (AllNonZeroConstants) 2102 return true; 2103 } 2104 2105 KnownBits Known(BitWidth); 2106 computeKnownBits(V, Known, Depth, Q); 2107 return Known.One != 0; 2108 } 2109 2110 /// Return true if V2 == V1 + X, where X is known non-zero. 2111 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { 2112 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2113 if (!BO || BO->getOpcode() != Instruction::Add) 2114 return false; 2115 Value *Op = nullptr; 2116 if (V2 == BO->getOperand(0)) 2117 Op = BO->getOperand(1); 2118 else if (V2 == BO->getOperand(1)) 2119 Op = BO->getOperand(0); 2120 else 2121 return false; 2122 return isKnownNonZero(Op, 0, Q); 2123 } 2124 2125 /// Return true if it is known that V1 != V2. 2126 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { 2127 if (V1 == V2) 2128 return false; 2129 if (V1->getType() != V2->getType()) 2130 // We can't look through casts yet. 2131 return false; 2132 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) 2133 return true; 2134 2135 if (V1->getType()->isIntOrIntVectorTy()) { 2136 // Are any known bits in V1 contradictory to known bits in V2? If V1 2137 // has a known zero where V2 has a known one, they must not be equal. 2138 KnownBits Known1 = computeKnownBits(V1, 0, Q); 2139 KnownBits Known2 = computeKnownBits(V2, 0, Q); 2140 2141 if (Known1.Zero.intersects(Known2.One) || 2142 Known2.Zero.intersects(Known1.One)) 2143 return true; 2144 } 2145 return false; 2146 } 2147 2148 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2149 /// simplify operations downstream. Mask is known to be zero for bits that V 2150 /// cannot have. 2151 /// 2152 /// This function is defined on values with integer type, values with pointer 2153 /// type, and vectors of integers. In the case 2154 /// where V is a vector, the mask, known zero, and known one values are the 2155 /// same width as the vector element, and the bit is set only if it is true 2156 /// for all of the elements in the vector. 2157 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2158 const Query &Q) { 2159 KnownBits Known(Mask.getBitWidth()); 2160 computeKnownBits(V, Known, Depth, Q); 2161 return Mask.isSubsetOf(Known.Zero); 2162 } 2163 2164 /// For vector constants, loop over the elements and find the constant with the 2165 /// minimum number of sign bits. Return 0 if the value is not a vector constant 2166 /// or if any element was not analyzed; otherwise, return the count for the 2167 /// element with the minimum number of sign bits. 2168 static unsigned computeNumSignBitsVectorConstant(const Value *V, 2169 unsigned TyBits) { 2170 const auto *CV = dyn_cast<Constant>(V); 2171 if (!CV || !CV->getType()->isVectorTy()) 2172 return 0; 2173 2174 unsigned MinSignBits = TyBits; 2175 unsigned NumElts = CV->getType()->getVectorNumElements(); 2176 for (unsigned i = 0; i != NumElts; ++i) { 2177 // If we find a non-ConstantInt, bail out. 2178 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 2179 if (!Elt) 2180 return 0; 2181 2182 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); 2183 } 2184 2185 return MinSignBits; 2186 } 2187 2188 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2189 const Query &Q); 2190 2191 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 2192 const Query &Q) { 2193 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q); 2194 assert(Result > 0 && "At least one sign bit needs to be present!"); 2195 return Result; 2196 } 2197 2198 /// Return the number of times the sign bit of the register is replicated into 2199 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2200 /// (itself), but other cases can give us information. For example, immediately 2201 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2202 /// other, so we return 3. For vectors, return the number of sign bits for the 2203 /// vector element with the minimum number of known sign bits. 2204 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2205 const Query &Q) { 2206 assert(Depth <= MaxDepth && "Limit Search Depth"); 2207 2208 // We return the minimum number of sign bits that are guaranteed to be present 2209 // in V, so for undef we have to conservatively return 1. We don't have the 2210 // same behavior for poison though -- that's a FIXME today. 2211 2212 Type *ScalarTy = V->getType()->getScalarType(); 2213 unsigned TyBits = ScalarTy->isPointerTy() ? 2214 Q.DL.getIndexTypeSizeInBits(ScalarTy) : 2215 Q.DL.getTypeSizeInBits(ScalarTy); 2216 2217 unsigned Tmp, Tmp2; 2218 unsigned FirstAnswer = 1; 2219 2220 // Note that ConstantInt is handled by the general computeKnownBits case 2221 // below. 2222 2223 if (Depth == MaxDepth) 2224 return 1; // Limit search depth. 2225 2226 const Operator *U = dyn_cast<Operator>(V); 2227 switch (Operator::getOpcode(V)) { 2228 default: break; 2229 case Instruction::SExt: 2230 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2231 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 2232 2233 case Instruction::SDiv: { 2234 const APInt *Denominator; 2235 // sdiv X, C -> adds log(C) sign bits. 2236 if (match(U->getOperand(1), m_APInt(Denominator))) { 2237 2238 // Ignore non-positive denominator. 2239 if (!Denominator->isStrictlyPositive()) 2240 break; 2241 2242 // Calculate the incoming numerator bits. 2243 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2244 2245 // Add floor(log(C)) bits to the numerator bits. 2246 return std::min(TyBits, NumBits + Denominator->logBase2()); 2247 } 2248 break; 2249 } 2250 2251 case Instruction::SRem: { 2252 const APInt *Denominator; 2253 // srem X, C -> we know that the result is within [-C+1,C) when C is a 2254 // positive constant. This let us put a lower bound on the number of sign 2255 // bits. 2256 if (match(U->getOperand(1), m_APInt(Denominator))) { 2257 2258 // Ignore non-positive denominator. 2259 if (!Denominator->isStrictlyPositive()) 2260 break; 2261 2262 // Calculate the incoming numerator bits. SRem by a positive constant 2263 // can't lower the number of sign bits. 2264 unsigned NumrBits = 2265 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2266 2267 // Calculate the leading sign bit constraints by examining the 2268 // denominator. Given that the denominator is positive, there are two 2269 // cases: 2270 // 2271 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2272 // (1 << ceilLogBase2(C)). 2273 // 2274 // 2. the numerator is negative. Then the result range is (-C,0] and 2275 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2276 // 2277 // Thus a lower bound on the number of sign bits is `TyBits - 2278 // ceilLogBase2(C)`. 2279 2280 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2281 return std::max(NumrBits, ResBits); 2282 } 2283 break; 2284 } 2285 2286 case Instruction::AShr: { 2287 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2288 // ashr X, C -> adds C sign bits. Vectors too. 2289 const APInt *ShAmt; 2290 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2291 if (ShAmt->uge(TyBits)) 2292 break; // Bad shift. 2293 unsigned ShAmtLimited = ShAmt->getZExtValue(); 2294 Tmp += ShAmtLimited; 2295 if (Tmp > TyBits) Tmp = TyBits; 2296 } 2297 return Tmp; 2298 } 2299 case Instruction::Shl: { 2300 const APInt *ShAmt; 2301 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2302 // shl destroys sign bits. 2303 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2304 if (ShAmt->uge(TyBits) || // Bad shift. 2305 ShAmt->uge(Tmp)) break; // Shifted all sign bits out. 2306 Tmp2 = ShAmt->getZExtValue(); 2307 return Tmp - Tmp2; 2308 } 2309 break; 2310 } 2311 case Instruction::And: 2312 case Instruction::Or: 2313 case Instruction::Xor: // NOT is handled here. 2314 // Logical binary ops preserve the number of sign bits at the worst. 2315 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2316 if (Tmp != 1) { 2317 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2318 FirstAnswer = std::min(Tmp, Tmp2); 2319 // We computed what we know about the sign bits as our first 2320 // answer. Now proceed to the generic code that uses 2321 // computeKnownBits, and pick whichever answer is better. 2322 } 2323 break; 2324 2325 case Instruction::Select: 2326 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2327 if (Tmp == 1) return 1; // Early out. 2328 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 2329 return std::min(Tmp, Tmp2); 2330 2331 case Instruction::Add: 2332 // Add can have at most one carry bit. Thus we know that the output 2333 // is, at worst, one more bit than the inputs. 2334 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2335 if (Tmp == 1) return 1; // Early out. 2336 2337 // Special case decrementing a value (ADD X, -1): 2338 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2339 if (CRHS->isAllOnesValue()) { 2340 KnownBits Known(TyBits); 2341 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 2342 2343 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2344 // sign bits set. 2345 if ((Known.Zero | 1).isAllOnesValue()) 2346 return TyBits; 2347 2348 // If we are subtracting one from a positive number, there is no carry 2349 // out of the result. 2350 if (Known.isNonNegative()) 2351 return Tmp; 2352 } 2353 2354 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2355 if (Tmp2 == 1) return 1; 2356 return std::min(Tmp, Tmp2)-1; 2357 2358 case Instruction::Sub: 2359 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2360 if (Tmp2 == 1) return 1; 2361 2362 // Handle NEG. 2363 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2364 if (CLHS->isNullValue()) { 2365 KnownBits Known(TyBits); 2366 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 2367 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2368 // sign bits set. 2369 if ((Known.Zero | 1).isAllOnesValue()) 2370 return TyBits; 2371 2372 // If the input is known to be positive (the sign bit is known clear), 2373 // the output of the NEG has the same number of sign bits as the input. 2374 if (Known.isNonNegative()) 2375 return Tmp2; 2376 2377 // Otherwise, we treat this like a SUB. 2378 } 2379 2380 // Sub can have at most one carry bit. Thus we know that the output 2381 // is, at worst, one more bit than the inputs. 2382 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2383 if (Tmp == 1) return 1; // Early out. 2384 return std::min(Tmp, Tmp2)-1; 2385 2386 case Instruction::Mul: { 2387 // The output of the Mul can be at most twice the valid bits in the inputs. 2388 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2389 if (SignBitsOp0 == 1) return 1; // Early out. 2390 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2391 if (SignBitsOp1 == 1) return 1; 2392 unsigned OutValidBits = 2393 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); 2394 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; 2395 } 2396 2397 case Instruction::PHI: { 2398 const PHINode *PN = cast<PHINode>(U); 2399 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2400 // Don't analyze large in-degree PHIs. 2401 if (NumIncomingValues > 4) break; 2402 // Unreachable blocks may have zero-operand PHI nodes. 2403 if (NumIncomingValues == 0) break; 2404 2405 // Take the minimum of all incoming values. This can't infinitely loop 2406 // because of our depth threshold. 2407 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q); 2408 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2409 if (Tmp == 1) return Tmp; 2410 Tmp = std::min( 2411 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q)); 2412 } 2413 return Tmp; 2414 } 2415 2416 case Instruction::Trunc: 2417 // FIXME: it's tricky to do anything useful for this, but it is an important 2418 // case for targets like X86. 2419 break; 2420 2421 case Instruction::ExtractElement: 2422 // Look through extract element. At the moment we keep this simple and skip 2423 // tracking the specific element. But at least we might find information 2424 // valid for all elements of the vector (for example if vector is sign 2425 // extended, shifted, etc). 2426 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2427 } 2428 2429 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2430 // use this information. 2431 2432 // If we can examine all elements of a vector constant successfully, we're 2433 // done (we can't do any better than that). If not, keep trying. 2434 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits)) 2435 return VecSignBits; 2436 2437 KnownBits Known(TyBits); 2438 computeKnownBits(V, Known, Depth, Q); 2439 2440 // If we know that the sign bit is either zero or one, determine the number of 2441 // identical bits in the top of the input value. 2442 return std::max(FirstAnswer, Known.countMinSignBits()); 2443 } 2444 2445 /// This function computes the integer multiple of Base that equals V. 2446 /// If successful, it returns true and returns the multiple in 2447 /// Multiple. If unsuccessful, it returns false. It looks 2448 /// through SExt instructions only if LookThroughSExt is true. 2449 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2450 bool LookThroughSExt, unsigned Depth) { 2451 const unsigned MaxDepth = 6; 2452 2453 assert(V && "No Value?"); 2454 assert(Depth <= MaxDepth && "Limit Search Depth"); 2455 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2456 2457 Type *T = V->getType(); 2458 2459 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2460 2461 if (Base == 0) 2462 return false; 2463 2464 if (Base == 1) { 2465 Multiple = V; 2466 return true; 2467 } 2468 2469 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2470 Constant *BaseVal = ConstantInt::get(T, Base); 2471 if (CO && CO == BaseVal) { 2472 // Multiple is 1. 2473 Multiple = ConstantInt::get(T, 1); 2474 return true; 2475 } 2476 2477 if (CI && CI->getZExtValue() % Base == 0) { 2478 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2479 return true; 2480 } 2481 2482 if (Depth == MaxDepth) return false; // Limit search depth. 2483 2484 Operator *I = dyn_cast<Operator>(V); 2485 if (!I) return false; 2486 2487 switch (I->getOpcode()) { 2488 default: break; 2489 case Instruction::SExt: 2490 if (!LookThroughSExt) return false; 2491 // otherwise fall through to ZExt 2492 LLVM_FALLTHROUGH; 2493 case Instruction::ZExt: 2494 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2495 LookThroughSExt, Depth+1); 2496 case Instruction::Shl: 2497 case Instruction::Mul: { 2498 Value *Op0 = I->getOperand(0); 2499 Value *Op1 = I->getOperand(1); 2500 2501 if (I->getOpcode() == Instruction::Shl) { 2502 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2503 if (!Op1CI) return false; 2504 // Turn Op0 << Op1 into Op0 * 2^Op1 2505 APInt Op1Int = Op1CI->getValue(); 2506 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2507 APInt API(Op1Int.getBitWidth(), 0); 2508 API.setBit(BitToSet); 2509 Op1 = ConstantInt::get(V->getContext(), API); 2510 } 2511 2512 Value *Mul0 = nullptr; 2513 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2514 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2515 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2516 if (Op1C->getType()->getPrimitiveSizeInBits() < 2517 MulC->getType()->getPrimitiveSizeInBits()) 2518 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2519 if (Op1C->getType()->getPrimitiveSizeInBits() > 2520 MulC->getType()->getPrimitiveSizeInBits()) 2521 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2522 2523 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2524 Multiple = ConstantExpr::getMul(MulC, Op1C); 2525 return true; 2526 } 2527 2528 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2529 if (Mul0CI->getValue() == 1) { 2530 // V == Base * Op1, so return Op1 2531 Multiple = Op1; 2532 return true; 2533 } 2534 } 2535 2536 Value *Mul1 = nullptr; 2537 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2538 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2539 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2540 if (Op0C->getType()->getPrimitiveSizeInBits() < 2541 MulC->getType()->getPrimitiveSizeInBits()) 2542 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2543 if (Op0C->getType()->getPrimitiveSizeInBits() > 2544 MulC->getType()->getPrimitiveSizeInBits()) 2545 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2546 2547 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2548 Multiple = ConstantExpr::getMul(MulC, Op0C); 2549 return true; 2550 } 2551 2552 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2553 if (Mul1CI->getValue() == 1) { 2554 // V == Base * Op0, so return Op0 2555 Multiple = Op0; 2556 return true; 2557 } 2558 } 2559 } 2560 } 2561 2562 // We could not determine if V is a multiple of Base. 2563 return false; 2564 } 2565 2566 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS, 2567 const TargetLibraryInfo *TLI) { 2568 const Function *F = ICS.getCalledFunction(); 2569 if (!F) 2570 return Intrinsic::not_intrinsic; 2571 2572 if (F->isIntrinsic()) 2573 return F->getIntrinsicID(); 2574 2575 if (!TLI) 2576 return Intrinsic::not_intrinsic; 2577 2578 LibFunc Func; 2579 // We're going to make assumptions on the semantics of the functions, check 2580 // that the target knows that it's available in this environment and it does 2581 // not have local linkage. 2582 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func)) 2583 return Intrinsic::not_intrinsic; 2584 2585 if (!ICS.onlyReadsMemory()) 2586 return Intrinsic::not_intrinsic; 2587 2588 // Otherwise check if we have a call to a function that can be turned into a 2589 // vector intrinsic. 2590 switch (Func) { 2591 default: 2592 break; 2593 case LibFunc_sin: 2594 case LibFunc_sinf: 2595 case LibFunc_sinl: 2596 return Intrinsic::sin; 2597 case LibFunc_cos: 2598 case LibFunc_cosf: 2599 case LibFunc_cosl: 2600 return Intrinsic::cos; 2601 case LibFunc_exp: 2602 case LibFunc_expf: 2603 case LibFunc_expl: 2604 return Intrinsic::exp; 2605 case LibFunc_exp2: 2606 case LibFunc_exp2f: 2607 case LibFunc_exp2l: 2608 return Intrinsic::exp2; 2609 case LibFunc_log: 2610 case LibFunc_logf: 2611 case LibFunc_logl: 2612 return Intrinsic::log; 2613 case LibFunc_log10: 2614 case LibFunc_log10f: 2615 case LibFunc_log10l: 2616 return Intrinsic::log10; 2617 case LibFunc_log2: 2618 case LibFunc_log2f: 2619 case LibFunc_log2l: 2620 return Intrinsic::log2; 2621 case LibFunc_fabs: 2622 case LibFunc_fabsf: 2623 case LibFunc_fabsl: 2624 return Intrinsic::fabs; 2625 case LibFunc_fmin: 2626 case LibFunc_fminf: 2627 case LibFunc_fminl: 2628 return Intrinsic::minnum; 2629 case LibFunc_fmax: 2630 case LibFunc_fmaxf: 2631 case LibFunc_fmaxl: 2632 return Intrinsic::maxnum; 2633 case LibFunc_copysign: 2634 case LibFunc_copysignf: 2635 case LibFunc_copysignl: 2636 return Intrinsic::copysign; 2637 case LibFunc_floor: 2638 case LibFunc_floorf: 2639 case LibFunc_floorl: 2640 return Intrinsic::floor; 2641 case LibFunc_ceil: 2642 case LibFunc_ceilf: 2643 case LibFunc_ceill: 2644 return Intrinsic::ceil; 2645 case LibFunc_trunc: 2646 case LibFunc_truncf: 2647 case LibFunc_truncl: 2648 return Intrinsic::trunc; 2649 case LibFunc_rint: 2650 case LibFunc_rintf: 2651 case LibFunc_rintl: 2652 return Intrinsic::rint; 2653 case LibFunc_nearbyint: 2654 case LibFunc_nearbyintf: 2655 case LibFunc_nearbyintl: 2656 return Intrinsic::nearbyint; 2657 case LibFunc_round: 2658 case LibFunc_roundf: 2659 case LibFunc_roundl: 2660 return Intrinsic::round; 2661 case LibFunc_pow: 2662 case LibFunc_powf: 2663 case LibFunc_powl: 2664 return Intrinsic::pow; 2665 case LibFunc_sqrt: 2666 case LibFunc_sqrtf: 2667 case LibFunc_sqrtl: 2668 return Intrinsic::sqrt; 2669 } 2670 2671 return Intrinsic::not_intrinsic; 2672 } 2673 2674 /// Return true if we can prove that the specified FP value is never equal to 2675 /// -0.0. 2676 /// 2677 /// NOTE: this function will need to be revisited when we support non-default 2678 /// rounding modes! 2679 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 2680 unsigned Depth) { 2681 if (auto *CFP = dyn_cast<ConstantFP>(V)) 2682 return !CFP->getValueAPF().isNegZero(); 2683 2684 // Limit search depth. 2685 if (Depth == MaxDepth) 2686 return false; 2687 2688 auto *Op = dyn_cast<Operator>(V); 2689 if (!Op) 2690 return false; 2691 2692 // Check if the nsz fast-math flag is set. 2693 if (auto *FPO = dyn_cast<FPMathOperator>(Op)) 2694 if (FPO->hasNoSignedZeros()) 2695 return true; 2696 2697 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. 2698 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP()))) 2699 return true; 2700 2701 // sitofp and uitofp turn into +0.0 for zero. 2702 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) 2703 return true; 2704 2705 if (auto *Call = dyn_cast<CallInst>(Op)) { 2706 Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI); 2707 switch (IID) { 2708 default: 2709 break; 2710 // sqrt(-0.0) = -0.0, no other negative results are possible. 2711 case Intrinsic::sqrt: 2712 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); 2713 // fabs(x) != -0.0 2714 case Intrinsic::fabs: 2715 return true; 2716 } 2717 } 2718 2719 return false; 2720 } 2721 2722 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 2723 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 2724 /// bit despite comparing equal. 2725 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 2726 const TargetLibraryInfo *TLI, 2727 bool SignBitOnly, 2728 unsigned Depth) { 2729 // TODO: This function does not do the right thing when SignBitOnly is true 2730 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 2731 // which flips the sign bits of NaNs. See 2732 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2733 2734 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2735 return !CFP->getValueAPF().isNegative() || 2736 (!SignBitOnly && CFP->getValueAPF().isZero()); 2737 } 2738 2739 // Handle vector of constants. 2740 if (auto *CV = dyn_cast<Constant>(V)) { 2741 if (CV->getType()->isVectorTy()) { 2742 unsigned NumElts = CV->getType()->getVectorNumElements(); 2743 for (unsigned i = 0; i != NumElts; ++i) { 2744 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 2745 if (!CFP) 2746 return false; 2747 if (CFP->getValueAPF().isNegative() && 2748 (SignBitOnly || !CFP->getValueAPF().isZero())) 2749 return false; 2750 } 2751 2752 // All non-negative ConstantFPs. 2753 return true; 2754 } 2755 } 2756 2757 if (Depth == MaxDepth) 2758 return false; // Limit search depth. 2759 2760 const Operator *I = dyn_cast<Operator>(V); 2761 if (!I) 2762 return false; 2763 2764 switch (I->getOpcode()) { 2765 default: 2766 break; 2767 // Unsigned integers are always nonnegative. 2768 case Instruction::UIToFP: 2769 return true; 2770 case Instruction::FMul: 2771 // x*x is always non-negative or a NaN. 2772 if (I->getOperand(0) == I->getOperand(1) && 2773 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 2774 return true; 2775 2776 LLVM_FALLTHROUGH; 2777 case Instruction::FAdd: 2778 case Instruction::FDiv: 2779 case Instruction::FRem: 2780 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2781 Depth + 1) && 2782 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2783 Depth + 1); 2784 case Instruction::Select: 2785 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2786 Depth + 1) && 2787 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2788 Depth + 1); 2789 case Instruction::FPExt: 2790 case Instruction::FPTrunc: 2791 // Widening/narrowing never change sign. 2792 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2793 Depth + 1); 2794 case Instruction::ExtractElement: 2795 // Look through extract element. At the moment we keep this simple and skip 2796 // tracking the specific element. But at least we might find information 2797 // valid for all elements of the vector. 2798 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2799 Depth + 1); 2800 case Instruction::Call: 2801 const auto *CI = cast<CallInst>(I); 2802 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2803 switch (IID) { 2804 default: 2805 break; 2806 case Intrinsic::maxnum: 2807 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2808 Depth + 1) || 2809 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2810 Depth + 1); 2811 case Intrinsic::minnum: 2812 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2813 Depth + 1) && 2814 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2815 Depth + 1); 2816 case Intrinsic::exp: 2817 case Intrinsic::exp2: 2818 case Intrinsic::fabs: 2819 return true; 2820 2821 case Intrinsic::sqrt: 2822 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 2823 if (!SignBitOnly) 2824 return true; 2825 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 2826 CannotBeNegativeZero(CI->getOperand(0), TLI)); 2827 2828 case Intrinsic::powi: 2829 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 2830 // powi(x,n) is non-negative if n is even. 2831 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 2832 return true; 2833 } 2834 // TODO: This is not correct. Given that exp is an integer, here are the 2835 // ways that pow can return a negative value: 2836 // 2837 // pow(x, exp) --> negative if exp is odd and x is negative. 2838 // pow(-0, exp) --> -inf if exp is negative odd. 2839 // pow(-0, exp) --> -0 if exp is positive odd. 2840 // pow(-inf, exp) --> -0 if exp is negative odd. 2841 // pow(-inf, exp) --> -inf if exp is positive odd. 2842 // 2843 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 2844 // but we must return false if x == -0. Unfortunately we do not currently 2845 // have a way of expressing this constraint. See details in 2846 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2847 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2848 Depth + 1); 2849 2850 case Intrinsic::fma: 2851 case Intrinsic::fmuladd: 2852 // x*x+y is non-negative if y is non-negative. 2853 return I->getOperand(0) == I->getOperand(1) && 2854 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 2855 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2856 Depth + 1); 2857 } 2858 break; 2859 } 2860 return false; 2861 } 2862 2863 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 2864 const TargetLibraryInfo *TLI) { 2865 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 2866 } 2867 2868 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 2869 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 2870 } 2871 2872 bool llvm::isKnownNeverNaN(const Value *V) { 2873 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"); 2874 2875 // If we're told that NaNs won't happen, assume they won't. 2876 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 2877 if (FPMathOp->hasNoNaNs()) 2878 return true; 2879 2880 // TODO: Handle instructions and potentially recurse like other 'isKnown' 2881 // functions. For example, the result of sitofp is never NaN. 2882 2883 // Handle scalar constants. 2884 if (auto *CFP = dyn_cast<ConstantFP>(V)) 2885 return !CFP->isNaN(); 2886 2887 // Bail out for constant expressions, but try to handle vector constants. 2888 if (!V->getType()->isVectorTy() || !isa<Constant>(V)) 2889 return false; 2890 2891 // For vectors, verify that each element is not NaN. 2892 unsigned NumElts = V->getType()->getVectorNumElements(); 2893 for (unsigned i = 0; i != NumElts; ++i) { 2894 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 2895 if (!Elt) 2896 return false; 2897 if (isa<UndefValue>(Elt)) 2898 continue; 2899 auto *CElt = dyn_cast<ConstantFP>(Elt); 2900 if (!CElt || CElt->isNaN()) 2901 return false; 2902 } 2903 // All elements were confirmed not-NaN or undefined. 2904 return true; 2905 } 2906 2907 /// If the specified value can be set by repeating the same byte in memory, 2908 /// return the i8 value that it is represented with. This is 2909 /// true for all i8 values obviously, but is also true for i32 0, i32 -1, 2910 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 2911 /// byte store (e.g. i16 0x1234), return null. 2912 Value *llvm::isBytewiseValue(Value *V) { 2913 // All byte-wide stores are splatable, even of arbitrary variables. 2914 if (V->getType()->isIntegerTy(8)) return V; 2915 2916 // Handle 'null' ConstantArrayZero etc. 2917 if (Constant *C = dyn_cast<Constant>(V)) 2918 if (C->isNullValue()) 2919 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 2920 2921 // Constant float and double values can be handled as integer values if the 2922 // corresponding integer value is "byteable". An important case is 0.0. 2923 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2924 if (CFP->getType()->isFloatTy()) 2925 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 2926 if (CFP->getType()->isDoubleTy()) 2927 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 2928 // Don't handle long double formats, which have strange constraints. 2929 } 2930 2931 // We can handle constant integers that are multiple of 8 bits. 2932 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 2933 if (CI->getBitWidth() % 8 == 0) { 2934 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 2935 2936 if (!CI->getValue().isSplat(8)) 2937 return nullptr; 2938 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8)); 2939 } 2940 } 2941 2942 // A ConstantDataArray/Vector is splatable if all its members are equal and 2943 // also splatable. 2944 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 2945 Value *Elt = CA->getElementAsConstant(0); 2946 Value *Val = isBytewiseValue(Elt); 2947 if (!Val) 2948 return nullptr; 2949 2950 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 2951 if (CA->getElementAsConstant(I) != Elt) 2952 return nullptr; 2953 2954 return Val; 2955 } 2956 2957 // Conceptually, we could handle things like: 2958 // %a = zext i8 %X to i16 2959 // %b = shl i16 %a, 8 2960 // %c = or i16 %a, %b 2961 // but until there is an example that actually needs this, it doesn't seem 2962 // worth worrying about. 2963 return nullptr; 2964 } 2965 2966 // This is the recursive version of BuildSubAggregate. It takes a few different 2967 // arguments. Idxs is the index within the nested struct From that we are 2968 // looking at now (which is of type IndexedType). IdxSkip is the number of 2969 // indices from Idxs that should be left out when inserting into the resulting 2970 // struct. To is the result struct built so far, new insertvalue instructions 2971 // build on that. 2972 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 2973 SmallVectorImpl<unsigned> &Idxs, 2974 unsigned IdxSkip, 2975 Instruction *InsertBefore) { 2976 StructType *STy = dyn_cast<StructType>(IndexedType); 2977 if (STy) { 2978 // Save the original To argument so we can modify it 2979 Value *OrigTo = To; 2980 // General case, the type indexed by Idxs is a struct 2981 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2982 // Process each struct element recursively 2983 Idxs.push_back(i); 2984 Value *PrevTo = To; 2985 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 2986 InsertBefore); 2987 Idxs.pop_back(); 2988 if (!To) { 2989 // Couldn't find any inserted value for this index? Cleanup 2990 while (PrevTo != OrigTo) { 2991 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 2992 PrevTo = Del->getAggregateOperand(); 2993 Del->eraseFromParent(); 2994 } 2995 // Stop processing elements 2996 break; 2997 } 2998 } 2999 // If we successfully found a value for each of our subaggregates 3000 if (To) 3001 return To; 3002 } 3003 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 3004 // the struct's elements had a value that was inserted directly. In the latter 3005 // case, perhaps we can't determine each of the subelements individually, but 3006 // we might be able to find the complete struct somewhere. 3007 3008 // Find the value that is at that particular spot 3009 Value *V = FindInsertedValue(From, Idxs); 3010 3011 if (!V) 3012 return nullptr; 3013 3014 // Insert the value in the new (sub) aggregate 3015 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 3016 "tmp", InsertBefore); 3017 } 3018 3019 // This helper takes a nested struct and extracts a part of it (which is again a 3020 // struct) into a new value. For example, given the struct: 3021 // { a, { b, { c, d }, e } } 3022 // and the indices "1, 1" this returns 3023 // { c, d }. 3024 // 3025 // It does this by inserting an insertvalue for each element in the resulting 3026 // struct, as opposed to just inserting a single struct. This will only work if 3027 // each of the elements of the substruct are known (ie, inserted into From by an 3028 // insertvalue instruction somewhere). 3029 // 3030 // All inserted insertvalue instructions are inserted before InsertBefore 3031 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 3032 Instruction *InsertBefore) { 3033 assert(InsertBefore && "Must have someplace to insert!"); 3034 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 3035 idx_range); 3036 Value *To = UndefValue::get(IndexedType); 3037 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 3038 unsigned IdxSkip = Idxs.size(); 3039 3040 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 3041 } 3042 3043 /// Given an aggregate and a sequence of indices, see if the scalar value 3044 /// indexed is already around as a register, for example if it was inserted 3045 /// directly into the aggregate. 3046 /// 3047 /// If InsertBefore is not null, this function will duplicate (modified) 3048 /// insertvalues when a part of a nested struct is extracted. 3049 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 3050 Instruction *InsertBefore) { 3051 // Nothing to index? Just return V then (this is useful at the end of our 3052 // recursion). 3053 if (idx_range.empty()) 3054 return V; 3055 // We have indices, so V should have an indexable type. 3056 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 3057 "Not looking at a struct or array?"); 3058 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 3059 "Invalid indices for type?"); 3060 3061 if (Constant *C = dyn_cast<Constant>(V)) { 3062 C = C->getAggregateElement(idx_range[0]); 3063 if (!C) return nullptr; 3064 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 3065 } 3066 3067 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 3068 // Loop the indices for the insertvalue instruction in parallel with the 3069 // requested indices 3070 const unsigned *req_idx = idx_range.begin(); 3071 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 3072 i != e; ++i, ++req_idx) { 3073 if (req_idx == idx_range.end()) { 3074 // We can't handle this without inserting insertvalues 3075 if (!InsertBefore) 3076 return nullptr; 3077 3078 // The requested index identifies a part of a nested aggregate. Handle 3079 // this specially. For example, 3080 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 3081 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 3082 // %C = extractvalue {i32, { i32, i32 } } %B, 1 3083 // This can be changed into 3084 // %A = insertvalue {i32, i32 } undef, i32 10, 0 3085 // %C = insertvalue {i32, i32 } %A, i32 11, 1 3086 // which allows the unused 0,0 element from the nested struct to be 3087 // removed. 3088 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 3089 InsertBefore); 3090 } 3091 3092 // This insert value inserts something else than what we are looking for. 3093 // See if the (aggregate) value inserted into has the value we are 3094 // looking for, then. 3095 if (*req_idx != *i) 3096 return FindInsertedValue(I->getAggregateOperand(), idx_range, 3097 InsertBefore); 3098 } 3099 // If we end up here, the indices of the insertvalue match with those 3100 // requested (though possibly only partially). Now we recursively look at 3101 // the inserted value, passing any remaining indices. 3102 return FindInsertedValue(I->getInsertedValueOperand(), 3103 makeArrayRef(req_idx, idx_range.end()), 3104 InsertBefore); 3105 } 3106 3107 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 3108 // If we're extracting a value from an aggregate that was extracted from 3109 // something else, we can extract from that something else directly instead. 3110 // However, we will need to chain I's indices with the requested indices. 3111 3112 // Calculate the number of indices required 3113 unsigned size = I->getNumIndices() + idx_range.size(); 3114 // Allocate some space to put the new indices in 3115 SmallVector<unsigned, 5> Idxs; 3116 Idxs.reserve(size); 3117 // Add indices from the extract value instruction 3118 Idxs.append(I->idx_begin(), I->idx_end()); 3119 3120 // Add requested indices 3121 Idxs.append(idx_range.begin(), idx_range.end()); 3122 3123 assert(Idxs.size() == size 3124 && "Number of indices added not correct?"); 3125 3126 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 3127 } 3128 // Otherwise, we don't know (such as, extracting from a function return value 3129 // or load instruction) 3130 return nullptr; 3131 } 3132 3133 /// Analyze the specified pointer to see if it can be expressed as a base 3134 /// pointer plus a constant offset. Return the base and offset to the caller. 3135 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 3136 const DataLayout &DL) { 3137 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType()); 3138 APInt ByteOffset(BitWidth, 0); 3139 3140 // We walk up the defs but use a visited set to handle unreachable code. In 3141 // that case, we stop after accumulating the cycle once (not that it 3142 // matters). 3143 SmallPtrSet<Value *, 16> Visited; 3144 while (Visited.insert(Ptr).second) { 3145 if (Ptr->getType()->isVectorTy()) 3146 break; 3147 3148 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 3149 // If one of the values we have visited is an addrspacecast, then 3150 // the pointer type of this GEP may be different from the type 3151 // of the Ptr parameter which was passed to this function. This 3152 // means when we construct GEPOffset, we need to use the size 3153 // of GEP's pointer type rather than the size of the original 3154 // pointer type. 3155 APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 3156 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 3157 break; 3158 3159 ByteOffset += GEPOffset.getSExtValue(); 3160 3161 Ptr = GEP->getPointerOperand(); 3162 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 3163 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 3164 Ptr = cast<Operator>(Ptr)->getOperand(0); 3165 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 3166 if (GA->isInterposable()) 3167 break; 3168 Ptr = GA->getAliasee(); 3169 } else { 3170 break; 3171 } 3172 } 3173 Offset = ByteOffset.getSExtValue(); 3174 return Ptr; 3175 } 3176 3177 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 3178 unsigned CharSize) { 3179 // Make sure the GEP has exactly three arguments. 3180 if (GEP->getNumOperands() != 3) 3181 return false; 3182 3183 // Make sure the index-ee is a pointer to array of \p CharSize integers. 3184 // CharSize. 3185 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 3186 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 3187 return false; 3188 3189 // Check to make sure that the first operand of the GEP is an integer and 3190 // has value 0 so that we are sure we're indexing into the initializer. 3191 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 3192 if (!FirstIdx || !FirstIdx->isZero()) 3193 return false; 3194 3195 return true; 3196 } 3197 3198 bool llvm::getConstantDataArrayInfo(const Value *V, 3199 ConstantDataArraySlice &Slice, 3200 unsigned ElementSize, uint64_t Offset) { 3201 assert(V); 3202 3203 // Look through bitcast instructions and geps. 3204 V = V->stripPointerCasts(); 3205 3206 // If the value is a GEP instruction or constant expression, treat it as an 3207 // offset. 3208 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3209 // The GEP operator should be based on a pointer to string constant, and is 3210 // indexing into the string constant. 3211 if (!isGEPBasedOnPointerToString(GEP, ElementSize)) 3212 return false; 3213 3214 // If the second index isn't a ConstantInt, then this is a variable index 3215 // into the array. If this occurs, we can't say anything meaningful about 3216 // the string. 3217 uint64_t StartIdx = 0; 3218 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 3219 StartIdx = CI->getZExtValue(); 3220 else 3221 return false; 3222 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize, 3223 StartIdx + Offset); 3224 } 3225 3226 // The GEP instruction, constant or instruction, must reference a global 3227 // variable that is a constant and is initialized. The referenced constant 3228 // initializer is the array that we'll use for optimization. 3229 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 3230 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 3231 return false; 3232 3233 const ConstantDataArray *Array; 3234 ArrayType *ArrayTy; 3235 if (GV->getInitializer()->isNullValue()) { 3236 Type *GVTy = GV->getValueType(); 3237 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) { 3238 // A zeroinitializer for the array; there is no ConstantDataArray. 3239 Array = nullptr; 3240 } else { 3241 const DataLayout &DL = GV->getParent()->getDataLayout(); 3242 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy); 3243 uint64_t Length = SizeInBytes / (ElementSize / 8); 3244 if (Length <= Offset) 3245 return false; 3246 3247 Slice.Array = nullptr; 3248 Slice.Offset = 0; 3249 Slice.Length = Length - Offset; 3250 return true; 3251 } 3252 } else { 3253 // This must be a ConstantDataArray. 3254 Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 3255 if (!Array) 3256 return false; 3257 ArrayTy = Array->getType(); 3258 } 3259 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize)) 3260 return false; 3261 3262 uint64_t NumElts = ArrayTy->getArrayNumElements(); 3263 if (Offset > NumElts) 3264 return false; 3265 3266 Slice.Array = Array; 3267 Slice.Offset = Offset; 3268 Slice.Length = NumElts - Offset; 3269 return true; 3270 } 3271 3272 /// This function computes the length of a null-terminated C string pointed to 3273 /// by V. If successful, it returns true and returns the string in Str. 3274 /// If unsuccessful, it returns false. 3275 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 3276 uint64_t Offset, bool TrimAtNul) { 3277 ConstantDataArraySlice Slice; 3278 if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) 3279 return false; 3280 3281 if (Slice.Array == nullptr) { 3282 if (TrimAtNul) { 3283 Str = StringRef(); 3284 return true; 3285 } 3286 if (Slice.Length == 1) { 3287 Str = StringRef("", 1); 3288 return true; 3289 } 3290 // We cannot instantiate a StringRef as we do not have an appropriate string 3291 // of 0s at hand. 3292 return false; 3293 } 3294 3295 // Start out with the entire array in the StringRef. 3296 Str = Slice.Array->getAsString(); 3297 // Skip over 'offset' bytes. 3298 Str = Str.substr(Slice.Offset); 3299 3300 if (TrimAtNul) { 3301 // Trim off the \0 and anything after it. If the array is not nul 3302 // terminated, we just return the whole end of string. The client may know 3303 // some other way that the string is length-bound. 3304 Str = Str.substr(0, Str.find('\0')); 3305 } 3306 return true; 3307 } 3308 3309 // These next two are very similar to the above, but also look through PHI 3310 // nodes. 3311 // TODO: See if we can integrate these two together. 3312 3313 /// If we can compute the length of the string pointed to by 3314 /// the specified pointer, return 'len+1'. If we can't, return 0. 3315 static uint64_t GetStringLengthH(const Value *V, 3316 SmallPtrSetImpl<const PHINode*> &PHIs, 3317 unsigned CharSize) { 3318 // Look through noop bitcast instructions. 3319 V = V->stripPointerCasts(); 3320 3321 // If this is a PHI node, there are two cases: either we have already seen it 3322 // or we haven't. 3323 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 3324 if (!PHIs.insert(PN).second) 3325 return ~0ULL; // already in the set. 3326 3327 // If it was new, see if all the input strings are the same length. 3328 uint64_t LenSoFar = ~0ULL; 3329 for (Value *IncValue : PN->incoming_values()) { 3330 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 3331 if (Len == 0) return 0; // Unknown length -> unknown. 3332 3333 if (Len == ~0ULL) continue; 3334 3335 if (Len != LenSoFar && LenSoFar != ~0ULL) 3336 return 0; // Disagree -> unknown. 3337 LenSoFar = Len; 3338 } 3339 3340 // Success, all agree. 3341 return LenSoFar; 3342 } 3343 3344 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 3345 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 3346 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 3347 if (Len1 == 0) return 0; 3348 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 3349 if (Len2 == 0) return 0; 3350 if (Len1 == ~0ULL) return Len2; 3351 if (Len2 == ~0ULL) return Len1; 3352 if (Len1 != Len2) return 0; 3353 return Len1; 3354 } 3355 3356 // Otherwise, see if we can read the string. 3357 ConstantDataArraySlice Slice; 3358 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 3359 return 0; 3360 3361 if (Slice.Array == nullptr) 3362 return 1; 3363 3364 // Search for nul characters 3365 unsigned NullIndex = 0; 3366 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 3367 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 3368 break; 3369 } 3370 3371 return NullIndex + 1; 3372 } 3373 3374 /// If we can compute the length of the string pointed to by 3375 /// the specified pointer, return 'len+1'. If we can't, return 0. 3376 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 3377 if (!V->getType()->isPointerTy()) return 0; 3378 3379 SmallPtrSet<const PHINode*, 32> PHIs; 3380 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 3381 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 3382 // an empty string as a length. 3383 return Len == ~0ULL ? 1 : Len; 3384 } 3385 3386 /// \brief \p PN defines a loop-variant pointer to an object. Check if the 3387 /// previous iteration of the loop was referring to the same object as \p PN. 3388 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 3389 const LoopInfo *LI) { 3390 // Find the loop-defined value. 3391 Loop *L = LI->getLoopFor(PN->getParent()); 3392 if (PN->getNumIncomingValues() != 2) 3393 return true; 3394 3395 // Find the value from previous iteration. 3396 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 3397 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3398 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 3399 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3400 return true; 3401 3402 // If a new pointer is loaded in the loop, the pointer references a different 3403 // object in every iteration. E.g.: 3404 // for (i) 3405 // int *p = a[i]; 3406 // ... 3407 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 3408 if (!L->isLoopInvariant(Load->getPointerOperand())) 3409 return false; 3410 return true; 3411 } 3412 3413 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 3414 unsigned MaxLookup) { 3415 if (!V->getType()->isPointerTy()) 3416 return V; 3417 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 3418 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3419 V = GEP->getPointerOperand(); 3420 } else if (Operator::getOpcode(V) == Instruction::BitCast || 3421 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 3422 V = cast<Operator>(V)->getOperand(0); 3423 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 3424 if (GA->isInterposable()) 3425 return V; 3426 V = GA->getAliasee(); 3427 } else if (isa<AllocaInst>(V)) { 3428 // An alloca can't be further simplified. 3429 return V; 3430 } else { 3431 if (auto CS = CallSite(V)) 3432 if (Value *RV = CS.getReturnedArgOperand()) { 3433 V = RV; 3434 continue; 3435 } 3436 3437 // See if InstructionSimplify knows any relevant tricks. 3438 if (Instruction *I = dyn_cast<Instruction>(V)) 3439 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 3440 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) { 3441 V = Simplified; 3442 continue; 3443 } 3444 3445 return V; 3446 } 3447 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 3448 } 3449 return V; 3450 } 3451 3452 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 3453 const DataLayout &DL, LoopInfo *LI, 3454 unsigned MaxLookup) { 3455 SmallPtrSet<Value *, 4> Visited; 3456 SmallVector<Value *, 4> Worklist; 3457 Worklist.push_back(V); 3458 do { 3459 Value *P = Worklist.pop_back_val(); 3460 P = GetUnderlyingObject(P, DL, MaxLookup); 3461 3462 if (!Visited.insert(P).second) 3463 continue; 3464 3465 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 3466 Worklist.push_back(SI->getTrueValue()); 3467 Worklist.push_back(SI->getFalseValue()); 3468 continue; 3469 } 3470 3471 if (PHINode *PN = dyn_cast<PHINode>(P)) { 3472 // If this PHI changes the underlying object in every iteration of the 3473 // loop, don't look through it. Consider: 3474 // int **A; 3475 // for (i) { 3476 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 3477 // Curr = A[i]; 3478 // *Prev, *Curr; 3479 // 3480 // Prev is tracking Curr one iteration behind so they refer to different 3481 // underlying objects. 3482 if (!LI || !LI->isLoopHeader(PN->getParent()) || 3483 isSameUnderlyingObjectInLoop(PN, LI)) 3484 for (Value *IncValue : PN->incoming_values()) 3485 Worklist.push_back(IncValue); 3486 continue; 3487 } 3488 3489 Objects.push_back(P); 3490 } while (!Worklist.empty()); 3491 } 3492 3493 /// This is the function that does the work of looking through basic 3494 /// ptrtoint+arithmetic+inttoptr sequences. 3495 static const Value *getUnderlyingObjectFromInt(const Value *V) { 3496 do { 3497 if (const Operator *U = dyn_cast<Operator>(V)) { 3498 // If we find a ptrtoint, we can transfer control back to the 3499 // regular getUnderlyingObjectFromInt. 3500 if (U->getOpcode() == Instruction::PtrToInt) 3501 return U->getOperand(0); 3502 // If we find an add of a constant, a multiplied value, or a phi, it's 3503 // likely that the other operand will lead us to the base 3504 // object. We don't have to worry about the case where the 3505 // object address is somehow being computed by the multiply, 3506 // because our callers only care when the result is an 3507 // identifiable object. 3508 if (U->getOpcode() != Instruction::Add || 3509 (!isa<ConstantInt>(U->getOperand(1)) && 3510 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 3511 !isa<PHINode>(U->getOperand(1)))) 3512 return V; 3513 V = U->getOperand(0); 3514 } else { 3515 return V; 3516 } 3517 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 3518 } while (true); 3519 } 3520 3521 /// This is a wrapper around GetUnderlyingObjects and adds support for basic 3522 /// ptrtoint+arithmetic+inttoptr sequences. 3523 /// It returns false if unidentified object is found in GetUnderlyingObjects. 3524 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, 3525 SmallVectorImpl<Value *> &Objects, 3526 const DataLayout &DL) { 3527 SmallPtrSet<const Value *, 16> Visited; 3528 SmallVector<const Value *, 4> Working(1, V); 3529 do { 3530 V = Working.pop_back_val(); 3531 3532 SmallVector<Value *, 4> Objs; 3533 GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL); 3534 3535 for (Value *V : Objs) { 3536 if (!Visited.insert(V).second) 3537 continue; 3538 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 3539 const Value *O = 3540 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 3541 if (O->getType()->isPointerTy()) { 3542 Working.push_back(O); 3543 continue; 3544 } 3545 } 3546 // If GetUnderlyingObjects fails to find an identifiable object, 3547 // getUnderlyingObjectsForCodeGen also fails for safety. 3548 if (!isIdentifiedObject(V)) { 3549 Objects.clear(); 3550 return false; 3551 } 3552 Objects.push_back(const_cast<Value *>(V)); 3553 } 3554 } while (!Working.empty()); 3555 return true; 3556 } 3557 3558 /// Return true if the only users of this pointer are lifetime markers. 3559 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 3560 for (const User *U : V->users()) { 3561 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3562 if (!II) return false; 3563 3564 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 3565 II->getIntrinsicID() != Intrinsic::lifetime_end) 3566 return false; 3567 } 3568 return true; 3569 } 3570 3571 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3572 const Instruction *CtxI, 3573 const DominatorTree *DT) { 3574 const Operator *Inst = dyn_cast<Operator>(V); 3575 if (!Inst) 3576 return false; 3577 3578 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3579 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3580 if (C->canTrap()) 3581 return false; 3582 3583 switch (Inst->getOpcode()) { 3584 default: 3585 return true; 3586 case Instruction::UDiv: 3587 case Instruction::URem: { 3588 // x / y is undefined if y == 0. 3589 const APInt *V; 3590 if (match(Inst->getOperand(1), m_APInt(V))) 3591 return *V != 0; 3592 return false; 3593 } 3594 case Instruction::SDiv: 3595 case Instruction::SRem: { 3596 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3597 const APInt *Numerator, *Denominator; 3598 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3599 return false; 3600 // We cannot hoist this division if the denominator is 0. 3601 if (*Denominator == 0) 3602 return false; 3603 // It's safe to hoist if the denominator is not 0 or -1. 3604 if (*Denominator != -1) 3605 return true; 3606 // At this point we know that the denominator is -1. It is safe to hoist as 3607 // long we know that the numerator is not INT_MIN. 3608 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3609 return !Numerator->isMinSignedValue(); 3610 // The numerator *might* be MinSignedValue. 3611 return false; 3612 } 3613 case Instruction::Load: { 3614 const LoadInst *LI = cast<LoadInst>(Inst); 3615 if (!LI->isUnordered() || 3616 // Speculative load may create a race that did not exist in the source. 3617 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) || 3618 // Speculative load may load data from dirty regions. 3619 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 3620 LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 3621 return false; 3622 const DataLayout &DL = LI->getModule()->getDataLayout(); 3623 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), 3624 LI->getAlignment(), DL, CtxI, DT); 3625 } 3626 case Instruction::Call: { 3627 auto *CI = cast<const CallInst>(Inst); 3628 const Function *Callee = CI->getCalledFunction(); 3629 3630 // The called function could have undefined behavior or side-effects, even 3631 // if marked readnone nounwind. 3632 return Callee && Callee->isSpeculatable(); 3633 } 3634 case Instruction::VAArg: 3635 case Instruction::Alloca: 3636 case Instruction::Invoke: 3637 case Instruction::PHI: 3638 case Instruction::Store: 3639 case Instruction::Ret: 3640 case Instruction::Br: 3641 case Instruction::IndirectBr: 3642 case Instruction::Switch: 3643 case Instruction::Unreachable: 3644 case Instruction::Fence: 3645 case Instruction::AtomicRMW: 3646 case Instruction::AtomicCmpXchg: 3647 case Instruction::LandingPad: 3648 case Instruction::Resume: 3649 case Instruction::CatchSwitch: 3650 case Instruction::CatchPad: 3651 case Instruction::CatchRet: 3652 case Instruction::CleanupPad: 3653 case Instruction::CleanupRet: 3654 return false; // Misc instructions which have effects 3655 } 3656 } 3657 3658 bool llvm::mayBeMemoryDependent(const Instruction &I) { 3659 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 3660 } 3661 3662 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS, 3663 const Value *RHS, 3664 const DataLayout &DL, 3665 AssumptionCache *AC, 3666 const Instruction *CxtI, 3667 const DominatorTree *DT) { 3668 // Multiplying n * m significant bits yields a result of n + m significant 3669 // bits. If the total number of significant bits does not exceed the 3670 // result bit width (minus 1), there is no overflow. 3671 // This means if we have enough leading zero bits in the operands 3672 // we can guarantee that the result does not overflow. 3673 // Ref: "Hacker's Delight" by Henry Warren 3674 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3675 KnownBits LHSKnown(BitWidth); 3676 KnownBits RHSKnown(BitWidth); 3677 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT); 3678 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT); 3679 // Note that underestimating the number of zero bits gives a more 3680 // conservative answer. 3681 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() + 3682 RHSKnown.countMinLeadingZeros(); 3683 // First handle the easy case: if we have enough zero bits there's 3684 // definitely no overflow. 3685 if (ZeroBits >= BitWidth) 3686 return OverflowResult::NeverOverflows; 3687 3688 // Get the largest possible values for each operand. 3689 APInt LHSMax = ~LHSKnown.Zero; 3690 APInt RHSMax = ~RHSKnown.Zero; 3691 3692 // We know the multiply operation doesn't overflow if the maximum values for 3693 // each operand will not overflow after we multiply them together. 3694 bool MaxOverflow; 3695 (void)LHSMax.umul_ov(RHSMax, MaxOverflow); 3696 if (!MaxOverflow) 3697 return OverflowResult::NeverOverflows; 3698 3699 // We know it always overflows if multiplying the smallest possible values for 3700 // the operands also results in overflow. 3701 bool MinOverflow; 3702 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow); 3703 if (MinOverflow) 3704 return OverflowResult::AlwaysOverflows; 3705 3706 return OverflowResult::MayOverflow; 3707 } 3708 3709 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS, 3710 const Value *RHS, 3711 const DataLayout &DL, 3712 AssumptionCache *AC, 3713 const Instruction *CxtI, 3714 const DominatorTree *DT) { 3715 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 3716 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) { 3717 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 3718 3719 if (LHSKnown.isNegative() && RHSKnown.isNegative()) { 3720 // The sign bit is set in both cases: this MUST overflow. 3721 // Create a simple add instruction, and insert it into the struct. 3722 return OverflowResult::AlwaysOverflows; 3723 } 3724 3725 if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) { 3726 // The sign bit is clear in both cases: this CANNOT overflow. 3727 // Create a simple add instruction, and insert it into the struct. 3728 return OverflowResult::NeverOverflows; 3729 } 3730 } 3731 3732 return OverflowResult::MayOverflow; 3733 } 3734 3735 /// \brief Return true if we can prove that adding the two values of the 3736 /// knownbits will not overflow. 3737 /// Otherwise return false. 3738 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown, 3739 const KnownBits &RHSKnown) { 3740 // Addition of two 2's complement numbers having opposite signs will never 3741 // overflow. 3742 if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) || 3743 (LHSKnown.isNonNegative() && RHSKnown.isNegative())) 3744 return true; 3745 3746 // If either of the values is known to be non-negative, adding them can only 3747 // overflow if the second is also non-negative, so we can assume that. 3748 // Two non-negative numbers will only overflow if there is a carry to the 3749 // sign bit, so we can check if even when the values are as big as possible 3750 // there is no overflow to the sign bit. 3751 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) { 3752 APInt MaxLHS = ~LHSKnown.Zero; 3753 MaxLHS.clearSignBit(); 3754 APInt MaxRHS = ~RHSKnown.Zero; 3755 MaxRHS.clearSignBit(); 3756 APInt Result = std::move(MaxLHS) + std::move(MaxRHS); 3757 return Result.isSignBitClear(); 3758 } 3759 3760 // If either of the values is known to be negative, adding them can only 3761 // overflow if the second is also negative, so we can assume that. 3762 // Two negative number will only overflow if there is no carry to the sign 3763 // bit, so we can check if even when the values are as small as possible 3764 // there is overflow to the sign bit. 3765 if (LHSKnown.isNegative() || RHSKnown.isNegative()) { 3766 APInt MinLHS = LHSKnown.One; 3767 MinLHS.clearSignBit(); 3768 APInt MinRHS = RHSKnown.One; 3769 MinRHS.clearSignBit(); 3770 APInt Result = std::move(MinLHS) + std::move(MinRHS); 3771 return Result.isSignBitSet(); 3772 } 3773 3774 // If we reached here it means that we know nothing about the sign bits. 3775 // In this case we can't know if there will be an overflow, since by 3776 // changing the sign bits any two values can be made to overflow. 3777 return false; 3778 } 3779 3780 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 3781 const Value *RHS, 3782 const AddOperator *Add, 3783 const DataLayout &DL, 3784 AssumptionCache *AC, 3785 const Instruction *CxtI, 3786 const DominatorTree *DT) { 3787 if (Add && Add->hasNoSignedWrap()) { 3788 return OverflowResult::NeverOverflows; 3789 } 3790 3791 // If LHS and RHS each have at least two sign bits, the addition will look 3792 // like 3793 // 3794 // XX..... + 3795 // YY..... 3796 // 3797 // If the carry into the most significant position is 0, X and Y can't both 3798 // be 1 and therefore the carry out of the addition is also 0. 3799 // 3800 // If the carry into the most significant position is 1, X and Y can't both 3801 // be 0 and therefore the carry out of the addition is also 1. 3802 // 3803 // Since the carry into the most significant position is always equal to 3804 // the carry out of the addition, there is no signed overflow. 3805 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 3806 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 3807 return OverflowResult::NeverOverflows; 3808 3809 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 3810 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 3811 3812 if (checkRippleForSignedAdd(LHSKnown, RHSKnown)) 3813 return OverflowResult::NeverOverflows; 3814 3815 // The remaining code needs Add to be available. Early returns if not so. 3816 if (!Add) 3817 return OverflowResult::MayOverflow; 3818 3819 // If the sign of Add is the same as at least one of the operands, this add 3820 // CANNOT overflow. This is particularly useful when the sum is 3821 // @llvm.assume'ed non-negative rather than proved so from analyzing its 3822 // operands. 3823 bool LHSOrRHSKnownNonNegative = 3824 (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()); 3825 bool LHSOrRHSKnownNegative = 3826 (LHSKnown.isNegative() || RHSKnown.isNegative()); 3827 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 3828 KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT); 3829 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 3830 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) { 3831 return OverflowResult::NeverOverflows; 3832 } 3833 } 3834 3835 return OverflowResult::MayOverflow; 3836 } 3837 3838 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, 3839 const DominatorTree &DT) { 3840 #ifndef NDEBUG 3841 auto IID = II->getIntrinsicID(); 3842 assert((IID == Intrinsic::sadd_with_overflow || 3843 IID == Intrinsic::uadd_with_overflow || 3844 IID == Intrinsic::ssub_with_overflow || 3845 IID == Intrinsic::usub_with_overflow || 3846 IID == Intrinsic::smul_with_overflow || 3847 IID == Intrinsic::umul_with_overflow) && 3848 "Not an overflow intrinsic!"); 3849 #endif 3850 3851 SmallVector<const BranchInst *, 2> GuardingBranches; 3852 SmallVector<const ExtractValueInst *, 2> Results; 3853 3854 for (const User *U : II->users()) { 3855 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 3856 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 3857 3858 if (EVI->getIndices()[0] == 0) 3859 Results.push_back(EVI); 3860 else { 3861 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 3862 3863 for (const auto *U : EVI->users()) 3864 if (const auto *B = dyn_cast<BranchInst>(U)) { 3865 assert(B->isConditional() && "How else is it using an i1?"); 3866 GuardingBranches.push_back(B); 3867 } 3868 } 3869 } else { 3870 // We are using the aggregate directly in a way we don't want to analyze 3871 // here (storing it to a global, say). 3872 return false; 3873 } 3874 } 3875 3876 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 3877 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 3878 if (!NoWrapEdge.isSingleEdge()) 3879 return false; 3880 3881 // Check if all users of the add are provably no-wrap. 3882 for (const auto *Result : Results) { 3883 // If the extractvalue itself is not executed on overflow, the we don't 3884 // need to check each use separately, since domination is transitive. 3885 if (DT.dominates(NoWrapEdge, Result->getParent())) 3886 continue; 3887 3888 for (auto &RU : Result->uses()) 3889 if (!DT.dominates(NoWrapEdge, RU)) 3890 return false; 3891 } 3892 3893 return true; 3894 }; 3895 3896 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); 3897 } 3898 3899 3900 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 3901 const DataLayout &DL, 3902 AssumptionCache *AC, 3903 const Instruction *CxtI, 3904 const DominatorTree *DT) { 3905 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 3906 Add, DL, AC, CxtI, DT); 3907 } 3908 3909 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 3910 const Value *RHS, 3911 const DataLayout &DL, 3912 AssumptionCache *AC, 3913 const Instruction *CxtI, 3914 const DominatorTree *DT) { 3915 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 3916 } 3917 3918 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 3919 // A memory operation returns normally if it isn't volatile. A volatile 3920 // operation is allowed to trap. 3921 // 3922 // An atomic operation isn't guaranteed to return in a reasonable amount of 3923 // time because it's possible for another thread to interfere with it for an 3924 // arbitrary length of time, but programs aren't allowed to rely on that. 3925 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 3926 return !LI->isVolatile(); 3927 if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 3928 return !SI->isVolatile(); 3929 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 3930 return !CXI->isVolatile(); 3931 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 3932 return !RMWI->isVolatile(); 3933 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I)) 3934 return !MII->isVolatile(); 3935 3936 // If there is no successor, then execution can't transfer to it. 3937 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) 3938 return !CRI->unwindsToCaller(); 3939 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) 3940 return !CatchSwitch->unwindsToCaller(); 3941 if (isa<ResumeInst>(I)) 3942 return false; 3943 if (isa<ReturnInst>(I)) 3944 return false; 3945 if (isa<UnreachableInst>(I)) 3946 return false; 3947 3948 // Calls can throw, or contain an infinite loop, or kill the process. 3949 if (auto CS = ImmutableCallSite(I)) { 3950 // Call sites that throw have implicit non-local control flow. 3951 if (!CS.doesNotThrow()) 3952 return false; 3953 3954 // Non-throwing call sites can loop infinitely, call exit/pthread_exit 3955 // etc. and thus not return. However, LLVM already assumes that 3956 // 3957 // - Thread exiting actions are modeled as writes to memory invisible to 3958 // the program. 3959 // 3960 // - Loops that don't have side effects (side effects are volatile/atomic 3961 // stores and IO) always terminate (see http://llvm.org/PR965). 3962 // Furthermore IO itself is also modeled as writes to memory invisible to 3963 // the program. 3964 // 3965 // We rely on those assumptions here, and use the memory effects of the call 3966 // target as a proxy for checking that it always returns. 3967 3968 // FIXME: This isn't aggressive enough; a call which only writes to a global 3969 // is guaranteed to return. 3970 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() || 3971 match(I, m_Intrinsic<Intrinsic::assume>()) || 3972 match(I, m_Intrinsic<Intrinsic::sideeffect>()); 3973 } 3974 3975 // Other instructions return normally. 3976 return true; 3977 } 3978 3979 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { 3980 // TODO: This is slightly consdervative for invoke instruction since exiting 3981 // via an exception *is* normal control for them. 3982 for (auto I = BB->begin(), E = BB->end(); I != E; ++I) 3983 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 3984 return false; 3985 return true; 3986 } 3987 3988 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 3989 const Loop *L) { 3990 // The loop header is guaranteed to be executed for every iteration. 3991 // 3992 // FIXME: Relax this constraint to cover all basic blocks that are 3993 // guaranteed to be executed at every iteration. 3994 if (I->getParent() != L->getHeader()) return false; 3995 3996 for (const Instruction &LI : *L->getHeader()) { 3997 if (&LI == I) return true; 3998 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 3999 } 4000 llvm_unreachable("Instruction not contained in its own parent basic block."); 4001 } 4002 4003 bool llvm::propagatesFullPoison(const Instruction *I) { 4004 switch (I->getOpcode()) { 4005 case Instruction::Add: 4006 case Instruction::Sub: 4007 case Instruction::Xor: 4008 case Instruction::Trunc: 4009 case Instruction::BitCast: 4010 case Instruction::AddrSpaceCast: 4011 case Instruction::Mul: 4012 case Instruction::Shl: 4013 case Instruction::GetElementPtr: 4014 // These operations all propagate poison unconditionally. Note that poison 4015 // is not any particular value, so xor or subtraction of poison with 4016 // itself still yields poison, not zero. 4017 return true; 4018 4019 case Instruction::AShr: 4020 case Instruction::SExt: 4021 // For these operations, one bit of the input is replicated across 4022 // multiple output bits. A replicated poison bit is still poison. 4023 return true; 4024 4025 case Instruction::ICmp: 4026 // Comparing poison with any value yields poison. This is why, for 4027 // instance, x s< (x +nsw 1) can be folded to true. 4028 return true; 4029 4030 default: 4031 return false; 4032 } 4033 } 4034 4035 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) { 4036 switch (I->getOpcode()) { 4037 case Instruction::Store: 4038 return cast<StoreInst>(I)->getPointerOperand(); 4039 4040 case Instruction::Load: 4041 return cast<LoadInst>(I)->getPointerOperand(); 4042 4043 case Instruction::AtomicCmpXchg: 4044 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 4045 4046 case Instruction::AtomicRMW: 4047 return cast<AtomicRMWInst>(I)->getPointerOperand(); 4048 4049 case Instruction::UDiv: 4050 case Instruction::SDiv: 4051 case Instruction::URem: 4052 case Instruction::SRem: 4053 return I->getOperand(1); 4054 4055 default: 4056 return nullptr; 4057 } 4058 } 4059 4060 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) { 4061 // We currently only look for uses of poison values within the same basic 4062 // block, as that makes it easier to guarantee that the uses will be 4063 // executed given that PoisonI is executed. 4064 // 4065 // FIXME: Expand this to consider uses beyond the same basic block. To do 4066 // this, look out for the distinction between post-dominance and strong 4067 // post-dominance. 4068 const BasicBlock *BB = PoisonI->getParent(); 4069 4070 // Set of instructions that we have proved will yield poison if PoisonI 4071 // does. 4072 SmallSet<const Value *, 16> YieldsPoison; 4073 SmallSet<const BasicBlock *, 4> Visited; 4074 YieldsPoison.insert(PoisonI); 4075 Visited.insert(PoisonI->getParent()); 4076 4077 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end(); 4078 4079 unsigned Iter = 0; 4080 while (Iter++ < MaxDepth) { 4081 for (auto &I : make_range(Begin, End)) { 4082 if (&I != PoisonI) { 4083 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I); 4084 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) 4085 return true; 4086 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 4087 return false; 4088 } 4089 4090 // Mark poison that propagates from I through uses of I. 4091 if (YieldsPoison.count(&I)) { 4092 for (const User *User : I.users()) { 4093 const Instruction *UserI = cast<Instruction>(User); 4094 if (propagatesFullPoison(UserI)) 4095 YieldsPoison.insert(User); 4096 } 4097 } 4098 } 4099 4100 if (auto *NextBB = BB->getSingleSuccessor()) { 4101 if (Visited.insert(NextBB).second) { 4102 BB = NextBB; 4103 Begin = BB->getFirstNonPHI()->getIterator(); 4104 End = BB->end(); 4105 continue; 4106 } 4107 } 4108 4109 break; 4110 } 4111 return false; 4112 } 4113 4114 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 4115 if (FMF.noNaNs()) 4116 return true; 4117 4118 if (auto *C = dyn_cast<ConstantFP>(V)) 4119 return !C->isNaN(); 4120 return false; 4121 } 4122 4123 static bool isKnownNonZero(const Value *V) { 4124 if (auto *C = dyn_cast<ConstantFP>(V)) 4125 return !C->isZero(); 4126 return false; 4127 } 4128 4129 /// Match clamp pattern for float types without care about NaNs or signed zeros. 4130 /// Given non-min/max outer cmp/select from the clamp pattern this 4131 /// function recognizes if it can be substitued by a "canonical" min/max 4132 /// pattern. 4133 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, 4134 Value *CmpLHS, Value *CmpRHS, 4135 Value *TrueVal, Value *FalseVal, 4136 Value *&LHS, Value *&RHS) { 4137 // Try to match 4138 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) 4139 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) 4140 // and return description of the outer Max/Min. 4141 4142 // First, check if select has inverse order: 4143 if (CmpRHS == FalseVal) { 4144 std::swap(TrueVal, FalseVal); 4145 Pred = CmpInst::getInversePredicate(Pred); 4146 } 4147 4148 // Assume success now. If there's no match, callers should not use these anyway. 4149 LHS = TrueVal; 4150 RHS = FalseVal; 4151 4152 const APFloat *FC1; 4153 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) 4154 return {SPF_UNKNOWN, SPNB_NA, false}; 4155 4156 const APFloat *FC2; 4157 switch (Pred) { 4158 case CmpInst::FCMP_OLT: 4159 case CmpInst::FCMP_OLE: 4160 case CmpInst::FCMP_ULT: 4161 case CmpInst::FCMP_ULE: 4162 if (match(FalseVal, 4163 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), 4164 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4165 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan) 4166 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; 4167 break; 4168 case CmpInst::FCMP_OGT: 4169 case CmpInst::FCMP_OGE: 4170 case CmpInst::FCMP_UGT: 4171 case CmpInst::FCMP_UGE: 4172 if (match(FalseVal, 4173 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), 4174 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4175 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan) 4176 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; 4177 break; 4178 default: 4179 break; 4180 } 4181 4182 return {SPF_UNKNOWN, SPNB_NA, false}; 4183 } 4184 4185 /// Recognize variations of: 4186 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 4187 static SelectPatternResult matchClamp(CmpInst::Predicate Pred, 4188 Value *CmpLHS, Value *CmpRHS, 4189 Value *TrueVal, Value *FalseVal) { 4190 // Swap the select operands and predicate to match the patterns below. 4191 if (CmpRHS != TrueVal) { 4192 Pred = ICmpInst::getSwappedPredicate(Pred); 4193 std::swap(TrueVal, FalseVal); 4194 } 4195 const APInt *C1; 4196 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 4197 const APInt *C2; 4198 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 4199 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 4200 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 4201 return {SPF_SMAX, SPNB_NA, false}; 4202 4203 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 4204 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 4205 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 4206 return {SPF_SMIN, SPNB_NA, false}; 4207 4208 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 4209 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 4210 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 4211 return {SPF_UMAX, SPNB_NA, false}; 4212 4213 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 4214 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 4215 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 4216 return {SPF_UMIN, SPNB_NA, false}; 4217 } 4218 return {SPF_UNKNOWN, SPNB_NA, false}; 4219 } 4220 4221 /// Recognize variations of: 4222 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) 4223 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, 4224 Value *CmpLHS, Value *CmpRHS, 4225 Value *TVal, Value *FVal, 4226 unsigned Depth) { 4227 // TODO: Allow FP min/max with nnan/nsz. 4228 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison"); 4229 4230 Value *A, *B; 4231 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); 4232 if (!SelectPatternResult::isMinOrMax(L.Flavor)) 4233 return {SPF_UNKNOWN, SPNB_NA, false}; 4234 4235 Value *C, *D; 4236 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); 4237 if (L.Flavor != R.Flavor) 4238 return {SPF_UNKNOWN, SPNB_NA, false}; 4239 4240 // We have something like: x Pred y ? min(a, b) : min(c, d). 4241 // Try to match the compare to the min/max operations of the select operands. 4242 // First, make sure we have the right compare predicate. 4243 switch (L.Flavor) { 4244 case SPF_SMIN: 4245 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { 4246 Pred = ICmpInst::getSwappedPredicate(Pred); 4247 std::swap(CmpLHS, CmpRHS); 4248 } 4249 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 4250 break; 4251 return {SPF_UNKNOWN, SPNB_NA, false}; 4252 case SPF_SMAX: 4253 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { 4254 Pred = ICmpInst::getSwappedPredicate(Pred); 4255 std::swap(CmpLHS, CmpRHS); 4256 } 4257 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 4258 break; 4259 return {SPF_UNKNOWN, SPNB_NA, false}; 4260 case SPF_UMIN: 4261 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { 4262 Pred = ICmpInst::getSwappedPredicate(Pred); 4263 std::swap(CmpLHS, CmpRHS); 4264 } 4265 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 4266 break; 4267 return {SPF_UNKNOWN, SPNB_NA, false}; 4268 case SPF_UMAX: 4269 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 4270 Pred = ICmpInst::getSwappedPredicate(Pred); 4271 std::swap(CmpLHS, CmpRHS); 4272 } 4273 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 4274 break; 4275 return {SPF_UNKNOWN, SPNB_NA, false}; 4276 default: 4277 return {SPF_UNKNOWN, SPNB_NA, false}; 4278 } 4279 4280 // If there is a common operand in the already matched min/max and the other 4281 // min/max operands match the compare operands (either directly or inverted), 4282 // then this is min/max of the same flavor. 4283 4284 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4285 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4286 if (D == B) { 4287 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4288 match(A, m_Not(m_Specific(CmpRHS))))) 4289 return {L.Flavor, SPNB_NA, false}; 4290 } 4291 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4292 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4293 if (C == B) { 4294 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4295 match(A, m_Not(m_Specific(CmpRHS))))) 4296 return {L.Flavor, SPNB_NA, false}; 4297 } 4298 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4299 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4300 if (D == A) { 4301 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4302 match(B, m_Not(m_Specific(CmpRHS))))) 4303 return {L.Flavor, SPNB_NA, false}; 4304 } 4305 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4306 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4307 if (C == A) { 4308 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4309 match(B, m_Not(m_Specific(CmpRHS))))) 4310 return {L.Flavor, SPNB_NA, false}; 4311 } 4312 4313 return {SPF_UNKNOWN, SPNB_NA, false}; 4314 } 4315 4316 /// Match non-obvious integer minimum and maximum sequences. 4317 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 4318 Value *CmpLHS, Value *CmpRHS, 4319 Value *TrueVal, Value *FalseVal, 4320 Value *&LHS, Value *&RHS, 4321 unsigned Depth) { 4322 // Assume success. If there's no match, callers should not use these anyway. 4323 LHS = TrueVal; 4324 RHS = FalseVal; 4325 4326 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); 4327 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4328 return SPR; 4329 4330 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); 4331 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4332 return SPR; 4333 4334 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 4335 return {SPF_UNKNOWN, SPNB_NA, false}; 4336 4337 // Z = X -nsw Y 4338 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) 4339 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) 4340 if (match(TrueVal, m_Zero()) && 4341 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4342 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4343 4344 // Z = X -nsw Y 4345 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) 4346 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) 4347 if (match(FalseVal, m_Zero()) && 4348 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4349 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4350 4351 const APInt *C1; 4352 if (!match(CmpRHS, m_APInt(C1))) 4353 return {SPF_UNKNOWN, SPNB_NA, false}; 4354 4355 // An unsigned min/max can be written with a signed compare. 4356 const APInt *C2; 4357 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 4358 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 4359 // Is the sign bit set? 4360 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 4361 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 4362 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() && 4363 C2->isMaxSignedValue()) 4364 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4365 4366 // Is the sign bit clear? 4367 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 4368 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 4369 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && 4370 C2->isMinSignedValue()) 4371 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4372 } 4373 4374 // Look through 'not' ops to find disguised signed min/max. 4375 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C) 4376 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C) 4377 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) && 4378 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2) 4379 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4380 4381 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X) 4382 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X) 4383 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) && 4384 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2) 4385 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4386 4387 return {SPF_UNKNOWN, SPNB_NA, false}; 4388 } 4389 4390 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 4391 FastMathFlags FMF, 4392 Value *CmpLHS, Value *CmpRHS, 4393 Value *TrueVal, Value *FalseVal, 4394 Value *&LHS, Value *&RHS, 4395 unsigned Depth) { 4396 LHS = CmpLHS; 4397 RHS = CmpRHS; 4398 4399 // Signed zero may return inconsistent results between implementations. 4400 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 4401 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 4402 // Therefore, we behave conservatively and only proceed if at least one of the 4403 // operands is known to not be zero or if we don't care about signed zero. 4404 switch (Pred) { 4405 default: break; 4406 // FIXME: Include OGT/OLT/UGT/ULT. 4407 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 4408 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 4409 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4410 !isKnownNonZero(CmpRHS)) 4411 return {SPF_UNKNOWN, SPNB_NA, false}; 4412 } 4413 4414 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 4415 bool Ordered = false; 4416 4417 // When given one NaN and one non-NaN input: 4418 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 4419 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 4420 // ordered comparison fails), which could be NaN or non-NaN. 4421 // so here we discover exactly what NaN behavior is required/accepted. 4422 if (CmpInst::isFPPredicate(Pred)) { 4423 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 4424 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 4425 4426 if (LHSSafe && RHSSafe) { 4427 // Both operands are known non-NaN. 4428 NaNBehavior = SPNB_RETURNS_ANY; 4429 } else if (CmpInst::isOrdered(Pred)) { 4430 // An ordered comparison will return false when given a NaN, so it 4431 // returns the RHS. 4432 Ordered = true; 4433 if (LHSSafe) 4434 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 4435 NaNBehavior = SPNB_RETURNS_NAN; 4436 else if (RHSSafe) 4437 NaNBehavior = SPNB_RETURNS_OTHER; 4438 else 4439 // Completely unsafe. 4440 return {SPF_UNKNOWN, SPNB_NA, false}; 4441 } else { 4442 Ordered = false; 4443 // An unordered comparison will return true when given a NaN, so it 4444 // returns the LHS. 4445 if (LHSSafe) 4446 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 4447 NaNBehavior = SPNB_RETURNS_OTHER; 4448 else if (RHSSafe) 4449 NaNBehavior = SPNB_RETURNS_NAN; 4450 else 4451 // Completely unsafe. 4452 return {SPF_UNKNOWN, SPNB_NA, false}; 4453 } 4454 } 4455 4456 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 4457 std::swap(CmpLHS, CmpRHS); 4458 Pred = CmpInst::getSwappedPredicate(Pred); 4459 if (NaNBehavior == SPNB_RETURNS_NAN) 4460 NaNBehavior = SPNB_RETURNS_OTHER; 4461 else if (NaNBehavior == SPNB_RETURNS_OTHER) 4462 NaNBehavior = SPNB_RETURNS_NAN; 4463 Ordered = !Ordered; 4464 } 4465 4466 // ([if]cmp X, Y) ? X : Y 4467 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 4468 switch (Pred) { 4469 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 4470 case ICmpInst::ICMP_UGT: 4471 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 4472 case ICmpInst::ICMP_SGT: 4473 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 4474 case ICmpInst::ICMP_ULT: 4475 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 4476 case ICmpInst::ICMP_SLT: 4477 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 4478 case FCmpInst::FCMP_UGT: 4479 case FCmpInst::FCMP_UGE: 4480 case FCmpInst::FCMP_OGT: 4481 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 4482 case FCmpInst::FCMP_ULT: 4483 case FCmpInst::FCMP_ULE: 4484 case FCmpInst::FCMP_OLT: 4485 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 4486 } 4487 } 4488 4489 const APInt *C1; 4490 if (match(CmpRHS, m_APInt(C1))) { 4491 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) || 4492 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) { 4493 4494 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X 4495 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X 4496 if (Pred == ICmpInst::ICMP_SGT && 4497 (C1->isNullValue() || C1->isAllOnesValue())) { 4498 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 4499 } 4500 4501 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X 4502 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X 4503 if (Pred == ICmpInst::ICMP_SLT && 4504 (C1->isNullValue() || C1->isOneValue())) { 4505 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 4506 } 4507 } 4508 } 4509 4510 if (CmpInst::isIntPredicate(Pred)) 4511 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); 4512 4513 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar 4514 // may return either -0.0 or 0.0, so fcmp/select pair has stricter 4515 // semantics than minNum. Be conservative in such case. 4516 if (NaNBehavior != SPNB_RETURNS_ANY || 4517 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4518 !isKnownNonZero(CmpRHS))) 4519 return {SPF_UNKNOWN, SPNB_NA, false}; 4520 4521 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 4522 } 4523 4524 /// Helps to match a select pattern in case of a type mismatch. 4525 /// 4526 /// The function processes the case when type of true and false values of a 4527 /// select instruction differs from type of the cmp instruction operands because 4528 /// of a cast instruction. The function checks if it is legal to move the cast 4529 /// operation after "select". If yes, it returns the new second value of 4530 /// "select" (with the assumption that cast is moved): 4531 /// 1. As operand of cast instruction when both values of "select" are same cast 4532 /// instructions. 4533 /// 2. As restored constant (by applying reverse cast operation) when the first 4534 /// value of the "select" is a cast operation and the second value is a 4535 /// constant. 4536 /// NOTE: We return only the new second value because the first value could be 4537 /// accessed as operand of cast instruction. 4538 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 4539 Instruction::CastOps *CastOp) { 4540 auto *Cast1 = dyn_cast<CastInst>(V1); 4541 if (!Cast1) 4542 return nullptr; 4543 4544 *CastOp = Cast1->getOpcode(); 4545 Type *SrcTy = Cast1->getSrcTy(); 4546 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 4547 // If V1 and V2 are both the same cast from the same type, look through V1. 4548 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 4549 return Cast2->getOperand(0); 4550 return nullptr; 4551 } 4552 4553 auto *C = dyn_cast<Constant>(V2); 4554 if (!C) 4555 return nullptr; 4556 4557 Constant *CastedTo = nullptr; 4558 switch (*CastOp) { 4559 case Instruction::ZExt: 4560 if (CmpI->isUnsigned()) 4561 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 4562 break; 4563 case Instruction::SExt: 4564 if (CmpI->isSigned()) 4565 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 4566 break; 4567 case Instruction::Trunc: 4568 Constant *CmpConst; 4569 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && 4570 CmpConst->getType() == SrcTy) { 4571 // Here we have the following case: 4572 // 4573 // %cond = cmp iN %x, CmpConst 4574 // %tr = trunc iN %x to iK 4575 // %narrowsel = select i1 %cond, iK %t, iK C 4576 // 4577 // We can always move trunc after select operation: 4578 // 4579 // %cond = cmp iN %x, CmpConst 4580 // %widesel = select i1 %cond, iN %x, iN CmpConst 4581 // %tr = trunc iN %widesel to iK 4582 // 4583 // Note that C could be extended in any way because we don't care about 4584 // upper bits after truncation. It can't be abs pattern, because it would 4585 // look like: 4586 // 4587 // select i1 %cond, x, -x. 4588 // 4589 // So only min/max pattern could be matched. Such match requires widened C 4590 // == CmpConst. That is why set widened C = CmpConst, condition trunc 4591 // CmpConst == C is checked below. 4592 CastedTo = CmpConst; 4593 } else { 4594 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 4595 } 4596 break; 4597 case Instruction::FPTrunc: 4598 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 4599 break; 4600 case Instruction::FPExt: 4601 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 4602 break; 4603 case Instruction::FPToUI: 4604 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 4605 break; 4606 case Instruction::FPToSI: 4607 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 4608 break; 4609 case Instruction::UIToFP: 4610 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 4611 break; 4612 case Instruction::SIToFP: 4613 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 4614 break; 4615 default: 4616 break; 4617 } 4618 4619 if (!CastedTo) 4620 return nullptr; 4621 4622 // Make sure the cast doesn't lose any information. 4623 Constant *CastedBack = 4624 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 4625 if (CastedBack != C) 4626 return nullptr; 4627 4628 return CastedTo; 4629 } 4630 4631 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 4632 Instruction::CastOps *CastOp, 4633 unsigned Depth) { 4634 if (Depth >= MaxDepth) 4635 return {SPF_UNKNOWN, SPNB_NA, false}; 4636 4637 SelectInst *SI = dyn_cast<SelectInst>(V); 4638 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 4639 4640 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 4641 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 4642 4643 CmpInst::Predicate Pred = CmpI->getPredicate(); 4644 Value *CmpLHS = CmpI->getOperand(0); 4645 Value *CmpRHS = CmpI->getOperand(1); 4646 Value *TrueVal = SI->getTrueValue(); 4647 Value *FalseVal = SI->getFalseValue(); 4648 FastMathFlags FMF; 4649 if (isa<FPMathOperator>(CmpI)) 4650 FMF = CmpI->getFastMathFlags(); 4651 4652 // Bail out early. 4653 if (CmpI->isEquality()) 4654 return {SPF_UNKNOWN, SPNB_NA, false}; 4655 4656 // Deal with type mismatches. 4657 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 4658 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { 4659 // If this is a potential fmin/fmax with a cast to integer, then ignore 4660 // -0.0 because there is no corresponding integer value. 4661 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 4662 FMF.setNoSignedZeros(); 4663 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4664 cast<CastInst>(TrueVal)->getOperand(0), C, 4665 LHS, RHS, Depth); 4666 } 4667 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { 4668 // If this is a potential fmin/fmax with a cast to integer, then ignore 4669 // -0.0 because there is no corresponding integer value. 4670 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 4671 FMF.setNoSignedZeros(); 4672 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4673 C, cast<CastInst>(FalseVal)->getOperand(0), 4674 LHS, RHS, Depth); 4675 } 4676 } 4677 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 4678 LHS, RHS, Depth); 4679 } 4680 4681 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) { 4682 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT; 4683 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT; 4684 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT; 4685 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT; 4686 if (SPF == SPF_FMINNUM) 4687 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; 4688 if (SPF == SPF_FMAXNUM) 4689 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; 4690 llvm_unreachable("unhandled!"); 4691 } 4692 4693 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) { 4694 if (SPF == SPF_SMIN) return SPF_SMAX; 4695 if (SPF == SPF_UMIN) return SPF_UMAX; 4696 if (SPF == SPF_SMAX) return SPF_SMIN; 4697 if (SPF == SPF_UMAX) return SPF_UMIN; 4698 llvm_unreachable("unhandled!"); 4699 } 4700 4701 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) { 4702 return getMinMaxPred(getInverseMinMaxFlavor(SPF)); 4703 } 4704 4705 /// Return true if "icmp Pred LHS RHS" is always true. 4706 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, 4707 const Value *RHS, const DataLayout &DL, 4708 unsigned Depth) { 4709 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 4710 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 4711 return true; 4712 4713 switch (Pred) { 4714 default: 4715 return false; 4716 4717 case CmpInst::ICMP_SLE: { 4718 const APInt *C; 4719 4720 // LHS s<= LHS +_{nsw} C if C >= 0 4721 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 4722 return !C->isNegative(); 4723 return false; 4724 } 4725 4726 case CmpInst::ICMP_ULE: { 4727 const APInt *C; 4728 4729 // LHS u<= LHS +_{nuw} C for any C 4730 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 4731 return true; 4732 4733 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 4734 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 4735 const Value *&X, 4736 const APInt *&CA, const APInt *&CB) { 4737 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 4738 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 4739 return true; 4740 4741 // If X & C == 0 then (X | C) == X +_{nuw} C 4742 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 4743 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 4744 KnownBits Known(CA->getBitWidth()); 4745 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, 4746 /*CxtI*/ nullptr, /*DT*/ nullptr); 4747 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 4748 return true; 4749 } 4750 4751 return false; 4752 }; 4753 4754 const Value *X; 4755 const APInt *CLHS, *CRHS; 4756 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 4757 return CLHS->ule(*CRHS); 4758 4759 return false; 4760 } 4761 } 4762 } 4763 4764 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 4765 /// ALHS ARHS" is true. Otherwise, return None. 4766 static Optional<bool> 4767 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 4768 const Value *ARHS, const Value *BLHS, const Value *BRHS, 4769 const DataLayout &DL, unsigned Depth) { 4770 switch (Pred) { 4771 default: 4772 return None; 4773 4774 case CmpInst::ICMP_SLT: 4775 case CmpInst::ICMP_SLE: 4776 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && 4777 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) 4778 return true; 4779 return None; 4780 4781 case CmpInst::ICMP_ULT: 4782 case CmpInst::ICMP_ULE: 4783 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && 4784 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) 4785 return true; 4786 return None; 4787 } 4788 } 4789 4790 /// Return true if the operands of the two compares match. IsSwappedOps is true 4791 /// when the operands match, but are swapped. 4792 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 4793 const Value *BLHS, const Value *BRHS, 4794 bool &IsSwappedOps) { 4795 4796 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 4797 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 4798 return IsMatchingOps || IsSwappedOps; 4799 } 4800 4801 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is 4802 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS 4803 /// BRHS" is false. Otherwise, return None if we can't infer anything. 4804 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 4805 const Value *ALHS, 4806 const Value *ARHS, 4807 CmpInst::Predicate BPred, 4808 const Value *BLHS, 4809 const Value *BRHS, 4810 bool IsSwappedOps) { 4811 // Canonicalize the operands so they're matching. 4812 if (IsSwappedOps) { 4813 std::swap(BLHS, BRHS); 4814 BPred = ICmpInst::getSwappedPredicate(BPred); 4815 } 4816 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 4817 return true; 4818 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 4819 return false; 4820 4821 return None; 4822 } 4823 4824 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is 4825 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS 4826 /// C2" is false. Otherwise, return None if we can't infer anything. 4827 static Optional<bool> 4828 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS, 4829 const ConstantInt *C1, 4830 CmpInst::Predicate BPred, 4831 const Value *BLHS, const ConstantInt *C2) { 4832 assert(ALHS == BLHS && "LHS operands must match."); 4833 ConstantRange DomCR = 4834 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 4835 ConstantRange CR = 4836 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); 4837 ConstantRange Intersection = DomCR.intersectWith(CR); 4838 ConstantRange Difference = DomCR.difference(CR); 4839 if (Intersection.isEmptySet()) 4840 return false; 4841 if (Difference.isEmptySet()) 4842 return true; 4843 return None; 4844 } 4845 4846 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 4847 /// false. Otherwise, return None if we can't infer anything. 4848 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS, 4849 const ICmpInst *RHS, 4850 const DataLayout &DL, bool LHSIsTrue, 4851 unsigned Depth) { 4852 Value *ALHS = LHS->getOperand(0); 4853 Value *ARHS = LHS->getOperand(1); 4854 // The rest of the logic assumes the LHS condition is true. If that's not the 4855 // case, invert the predicate to make it so. 4856 ICmpInst::Predicate APred = 4857 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); 4858 4859 Value *BLHS = RHS->getOperand(0); 4860 Value *BRHS = RHS->getOperand(1); 4861 ICmpInst::Predicate BPred = RHS->getPredicate(); 4862 4863 // Can we infer anything when the two compares have matching operands? 4864 bool IsSwappedOps; 4865 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) { 4866 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 4867 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps)) 4868 return Implication; 4869 // No amount of additional analysis will infer the second condition, so 4870 // early exit. 4871 return None; 4872 } 4873 4874 // Can we infer anything when the LHS operands match and the RHS operands are 4875 // constants (not necessarily matching)? 4876 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 4877 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 4878 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS, 4879 cast<ConstantInt>(BRHS))) 4880 return Implication; 4881 // No amount of additional analysis will infer the second condition, so 4882 // early exit. 4883 return None; 4884 } 4885 4886 if (APred == BPred) 4887 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth); 4888 return None; 4889 } 4890 4891 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 4892 /// false. Otherwise, return None if we can't infer anything. We expect the 4893 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction. 4894 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS, 4895 const ICmpInst *RHS, 4896 const DataLayout &DL, bool LHSIsTrue, 4897 unsigned Depth) { 4898 // The LHS must be an 'or' or an 'and' instruction. 4899 assert((LHS->getOpcode() == Instruction::And || 4900 LHS->getOpcode() == Instruction::Or) && 4901 "Expected LHS to be 'and' or 'or'."); 4902 4903 assert(Depth <= MaxDepth && "Hit recursion limit"); 4904 4905 // If the result of an 'or' is false, then we know both legs of the 'or' are 4906 // false. Similarly, if the result of an 'and' is true, then we know both 4907 // legs of the 'and' are true. 4908 Value *ALHS, *ARHS; 4909 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) || 4910 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) { 4911 // FIXME: Make this non-recursion. 4912 if (Optional<bool> Implication = 4913 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1)) 4914 return Implication; 4915 if (Optional<bool> Implication = 4916 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1)) 4917 return Implication; 4918 return None; 4919 } 4920 return None; 4921 } 4922 4923 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 4924 const DataLayout &DL, bool LHSIsTrue, 4925 unsigned Depth) { 4926 // Bail out when we hit the limit. 4927 if (Depth == MaxDepth) 4928 return None; 4929 4930 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for 4931 // example. 4932 if (LHS->getType() != RHS->getType()) 4933 return None; 4934 4935 Type *OpTy = LHS->getType(); 4936 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!"); 4937 4938 // LHS ==> RHS by definition 4939 if (LHS == RHS) 4940 return LHSIsTrue; 4941 4942 // FIXME: Extending the code below to handle vectors. 4943 if (OpTy->isVectorTy()) 4944 return None; 4945 4946 assert(OpTy->isIntegerTy(1) && "implied by above"); 4947 4948 // Both LHS and RHS are icmps. 4949 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); 4950 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS); 4951 if (LHSCmp && RHSCmp) 4952 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth); 4953 4954 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be 4955 // an icmp. FIXME: Add support for and/or on the RHS. 4956 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS); 4957 if (LHSBO && RHSCmp) { 4958 if ((LHSBO->getOpcode() == Instruction::And || 4959 LHSBO->getOpcode() == Instruction::Or)) 4960 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth); 4961 } 4962 return None; 4963 } 4964