1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains routines that help analyze properties that chains of 11 // computations have. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/ADT/SmallPtrSet.h" 17 #include "llvm/Analysis/AssumptionCache.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/MemoryBuiltins.h" 20 #include "llvm/Analysis/LoopInfo.h" 21 #include "llvm/IR/CallSite.h" 22 #include "llvm/IR/ConstantRange.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/Dominators.h" 26 #include "llvm/IR/GetElementPtrTypeIterator.h" 27 #include "llvm/IR/GlobalAlias.h" 28 #include "llvm/IR/GlobalVariable.h" 29 #include "llvm/IR/Instructions.h" 30 #include "llvm/IR/IntrinsicInst.h" 31 #include "llvm/IR/LLVMContext.h" 32 #include "llvm/IR/Metadata.h" 33 #include "llvm/IR/Operator.h" 34 #include "llvm/IR/PatternMatch.h" 35 #include "llvm/IR/Statepoint.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/MathExtras.h" 38 #include <cstring> 39 using namespace llvm; 40 using namespace llvm::PatternMatch; 41 42 const unsigned MaxDepth = 6; 43 44 /// Enable an experimental feature to leverage information about dominating 45 /// conditions to compute known bits. The individual options below control how 46 /// hard we search. The defaults are choosen to be fairly aggressive. If you 47 /// run into compile time problems when testing, scale them back and report 48 /// your findings. 49 static cl::opt<bool> EnableDomConditions("value-tracking-dom-conditions", 50 cl::Hidden, cl::init(false)); 51 52 // This is expensive, so we only do it for the top level query value. 53 // (TODO: evaluate cost vs profit, consider higher thresholds) 54 static cl::opt<unsigned> DomConditionsMaxDepth("dom-conditions-max-depth", 55 cl::Hidden, cl::init(1)); 56 57 /// How many dominating blocks should be scanned looking for dominating 58 /// conditions? 59 static cl::opt<unsigned> DomConditionsMaxDomBlocks("dom-conditions-dom-blocks", 60 cl::Hidden, 61 cl::init(20000)); 62 63 // Controls the number of uses of the value searched for possible 64 // dominating comparisons. 65 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 66 cl::Hidden, cl::init(2000)); 67 68 // If true, don't consider only compares whose only use is a branch. 69 static cl::opt<bool> DomConditionsSingleCmpUse("dom-conditions-single-cmp-use", 70 cl::Hidden, cl::init(false)); 71 72 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns 73 /// 0). For vector types, returns the element type's bitwidth. 74 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 75 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 76 return BitWidth; 77 78 return DL.getPointerTypeSizeInBits(Ty); 79 } 80 81 // Many of these functions have internal versions that take an assumption 82 // exclusion set. This is because of the potential for mutual recursion to 83 // cause computeKnownBits to repeatedly visit the same assume intrinsic. The 84 // classic case of this is assume(x = y), which will attempt to determine 85 // bits in x from bits in y, which will attempt to determine bits in y from 86 // bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 87 // isKnownNonZero, which calls computeKnownBits and ComputeSignBit and 88 // isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so on. 89 typedef SmallPtrSet<const Value *, 8> ExclInvsSet; 90 91 namespace { 92 // Simplifying using an assume can only be done in a particular control-flow 93 // context (the context instruction provides that context). If an assume and 94 // the context instruction are not in the same block then the DT helps in 95 // figuring out if we can use it. 96 struct Query { 97 ExclInvsSet ExclInvs; 98 AssumptionCache *AC; 99 const Instruction *CxtI; 100 const DominatorTree *DT; 101 102 Query(AssumptionCache *AC = nullptr, const Instruction *CxtI = nullptr, 103 const DominatorTree *DT = nullptr) 104 : AC(AC), CxtI(CxtI), DT(DT) {} 105 106 Query(const Query &Q, const Value *NewExcl) 107 : ExclInvs(Q.ExclInvs), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT) { 108 ExclInvs.insert(NewExcl); 109 } 110 }; 111 } // end anonymous namespace 112 113 // Given the provided Value and, potentially, a context instruction, return 114 // the preferred context instruction (if any). 115 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 116 // If we've been provided with a context instruction, then use that (provided 117 // it has been inserted). 118 if (CxtI && CxtI->getParent()) 119 return CxtI; 120 121 // If the value is really an already-inserted instruction, then use that. 122 CxtI = dyn_cast<Instruction>(V); 123 if (CxtI && CxtI->getParent()) 124 return CxtI; 125 126 return nullptr; 127 } 128 129 static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 130 const DataLayout &DL, unsigned Depth, 131 const Query &Q); 132 133 void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 134 const DataLayout &DL, unsigned Depth, 135 AssumptionCache *AC, const Instruction *CxtI, 136 const DominatorTree *DT) { 137 ::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, 138 Query(AC, safeCxtI(V, CxtI), DT)); 139 } 140 141 bool llvm::haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL, 142 AssumptionCache *AC, const Instruction *CxtI, 143 const DominatorTree *DT) { 144 assert(LHS->getType() == RHS->getType() && 145 "LHS and RHS should have the same type"); 146 assert(LHS->getType()->isIntOrIntVectorTy() && 147 "LHS and RHS should be integers"); 148 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 149 APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0); 150 APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0); 151 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT); 152 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT); 153 return (LHSKnownZero | RHSKnownZero).isAllOnesValue(); 154 } 155 156 static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 157 const DataLayout &DL, unsigned Depth, 158 const Query &Q); 159 160 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 161 const DataLayout &DL, unsigned Depth, 162 AssumptionCache *AC, const Instruction *CxtI, 163 const DominatorTree *DT) { 164 ::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, 165 Query(AC, safeCxtI(V, CxtI), DT)); 166 } 167 168 static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth, 169 const Query &Q, const DataLayout &DL); 170 171 bool llvm::isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, bool OrZero, 172 unsigned Depth, AssumptionCache *AC, 173 const Instruction *CxtI, 174 const DominatorTree *DT) { 175 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, 176 Query(AC, safeCxtI(V, CxtI), DT), DL); 177 } 178 179 static bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth, 180 const Query &Q); 181 182 bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth, 183 AssumptionCache *AC, const Instruction *CxtI, 184 const DominatorTree *DT) { 185 return ::isKnownNonZero(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT)); 186 } 187 188 static bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL, 189 unsigned Depth, const Query &Q); 190 191 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL, 192 unsigned Depth, AssumptionCache *AC, 193 const Instruction *CxtI, const DominatorTree *DT) { 194 return ::MaskedValueIsZero(V, Mask, DL, Depth, 195 Query(AC, safeCxtI(V, CxtI), DT)); 196 } 197 198 static unsigned ComputeNumSignBits(Value *V, const DataLayout &DL, 199 unsigned Depth, const Query &Q); 200 201 unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout &DL, 202 unsigned Depth, AssumptionCache *AC, 203 const Instruction *CxtI, 204 const DominatorTree *DT) { 205 return ::ComputeNumSignBits(V, DL, Depth, Query(AC, safeCxtI(V, CxtI), DT)); 206 } 207 208 static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, 209 APInt &KnownZero, APInt &KnownOne, 210 APInt &KnownZero2, APInt &KnownOne2, 211 const DataLayout &DL, unsigned Depth, 212 const Query &Q) { 213 if (!Add) { 214 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) { 215 // We know that the top bits of C-X are clear if X contains less bits 216 // than C (i.e. no wrap-around can happen). For example, 20-X is 217 // positive if we can prove that X is >= 0 and < 16. 218 if (!CLHS->getValue().isNegative()) { 219 unsigned BitWidth = KnownZero.getBitWidth(); 220 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros(); 221 // NLZ can't be BitWidth with no sign bit 222 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 223 computeKnownBits(Op1, KnownZero2, KnownOne2, DL, Depth + 1, Q); 224 225 // If all of the MaskV bits are known to be zero, then we know the 226 // output top bits are zero, because we now know that the output is 227 // from [0-C]. 228 if ((KnownZero2 & MaskV) == MaskV) { 229 unsigned NLZ2 = CLHS->getValue().countLeadingZeros(); 230 // Top bits known zero. 231 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 232 } 233 } 234 } 235 } 236 237 unsigned BitWidth = KnownZero.getBitWidth(); 238 239 // If an initial sequence of bits in the result is not needed, the 240 // corresponding bits in the operands are not needed. 241 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 242 computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, DL, Depth + 1, Q); 243 computeKnownBits(Op1, KnownZero2, KnownOne2, DL, Depth + 1, Q); 244 245 // Carry in a 1 for a subtract, rather than a 0. 246 APInt CarryIn(BitWidth, 0); 247 if (!Add) { 248 // Sum = LHS + ~RHS + 1 249 std::swap(KnownZero2, KnownOne2); 250 CarryIn.setBit(0); 251 } 252 253 APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn; 254 APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn; 255 256 // Compute known bits of the carry. 257 APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2); 258 APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2; 259 260 // Compute set of known bits (where all three relevant bits are known). 261 APInt LHSKnown = LHSKnownZero | LHSKnownOne; 262 APInt RHSKnown = KnownZero2 | KnownOne2; 263 APInt CarryKnown = CarryKnownZero | CarryKnownOne; 264 APInt Known = LHSKnown & RHSKnown & CarryKnown; 265 266 assert((PossibleSumZero & Known) == (PossibleSumOne & Known) && 267 "known bits of sum differ"); 268 269 // Compute known bits of the result. 270 KnownZero = ~PossibleSumOne & Known; 271 KnownOne = PossibleSumOne & Known; 272 273 // Are we still trying to solve for the sign bit? 274 if (!Known.isNegative()) { 275 if (NSW) { 276 // Adding two non-negative numbers, or subtracting a negative number from 277 // a non-negative one, can't wrap into negative. 278 if (LHSKnownZero.isNegative() && KnownZero2.isNegative()) 279 KnownZero |= APInt::getSignBit(BitWidth); 280 // Adding two negative numbers, or subtracting a non-negative number from 281 // a negative one, can't wrap into non-negative. 282 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative()) 283 KnownOne |= APInt::getSignBit(BitWidth); 284 } 285 } 286 } 287 288 static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW, 289 APInt &KnownZero, APInt &KnownOne, 290 APInt &KnownZero2, APInt &KnownOne2, 291 const DataLayout &DL, unsigned Depth, 292 const Query &Q) { 293 unsigned BitWidth = KnownZero.getBitWidth(); 294 computeKnownBits(Op1, KnownZero, KnownOne, DL, Depth + 1, Q); 295 computeKnownBits(Op0, KnownZero2, KnownOne2, DL, Depth + 1, Q); 296 297 bool isKnownNegative = false; 298 bool isKnownNonNegative = false; 299 // If the multiplication is known not to overflow, compute the sign bit. 300 if (NSW) { 301 if (Op0 == Op1) { 302 // The product of a number with itself is non-negative. 303 isKnownNonNegative = true; 304 } else { 305 bool isKnownNonNegativeOp1 = KnownZero.isNegative(); 306 bool isKnownNonNegativeOp0 = KnownZero2.isNegative(); 307 bool isKnownNegativeOp1 = KnownOne.isNegative(); 308 bool isKnownNegativeOp0 = KnownOne2.isNegative(); 309 // The product of two numbers with the same sign is non-negative. 310 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 311 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 312 // The product of a negative number and a non-negative number is either 313 // negative or zero. 314 if (!isKnownNonNegative) 315 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 316 isKnownNonZero(Op0, DL, Depth, Q)) || 317 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 318 isKnownNonZero(Op1, DL, Depth, Q)); 319 } 320 } 321 322 // If low bits are zero in either operand, output low known-0 bits. 323 // Also compute a conserative estimate for high known-0 bits. 324 // More trickiness is possible, but this is sufficient for the 325 // interesting case of alignment computation. 326 KnownOne.clearAllBits(); 327 unsigned TrailZ = KnownZero.countTrailingOnes() + 328 KnownZero2.countTrailingOnes(); 329 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 330 KnownZero2.countLeadingOnes(), 331 BitWidth) - BitWidth; 332 333 TrailZ = std::min(TrailZ, BitWidth); 334 LeadZ = std::min(LeadZ, BitWidth); 335 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 336 APInt::getHighBitsSet(BitWidth, LeadZ); 337 338 // Only make use of no-wrap flags if we failed to compute the sign bit 339 // directly. This matters if the multiplication always overflows, in 340 // which case we prefer to follow the result of the direct computation, 341 // though as the program is invoking undefined behaviour we can choose 342 // whatever we like here. 343 if (isKnownNonNegative && !KnownOne.isNegative()) 344 KnownZero.setBit(BitWidth - 1); 345 else if (isKnownNegative && !KnownZero.isNegative()) 346 KnownOne.setBit(BitWidth - 1); 347 } 348 349 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 350 APInt &KnownZero) { 351 unsigned BitWidth = KnownZero.getBitWidth(); 352 unsigned NumRanges = Ranges.getNumOperands() / 2; 353 assert(NumRanges >= 1); 354 355 // Use the high end of the ranges to find leading zeros. 356 unsigned MinLeadingZeros = BitWidth; 357 for (unsigned i = 0; i < NumRanges; ++i) { 358 ConstantInt *Lower = 359 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 360 ConstantInt *Upper = 361 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 362 ConstantRange Range(Lower->getValue(), Upper->getValue()); 363 if (Range.isWrappedSet()) 364 MinLeadingZeros = 0; // -1 has no zeros 365 unsigned LeadingZeros = (Upper->getValue() - 1).countLeadingZeros(); 366 MinLeadingZeros = std::min(LeadingZeros, MinLeadingZeros); 367 } 368 369 KnownZero = APInt::getHighBitsSet(BitWidth, MinLeadingZeros); 370 } 371 372 static bool isEphemeralValueOf(Instruction *I, const Value *E) { 373 SmallVector<const Value *, 16> WorkSet(1, I); 374 SmallPtrSet<const Value *, 32> Visited; 375 SmallPtrSet<const Value *, 16> EphValues; 376 377 while (!WorkSet.empty()) { 378 const Value *V = WorkSet.pop_back_val(); 379 if (!Visited.insert(V).second) 380 continue; 381 382 // If all uses of this value are ephemeral, then so is this value. 383 bool FoundNEUse = false; 384 for (const User *I : V->users()) 385 if (!EphValues.count(I)) { 386 FoundNEUse = true; 387 break; 388 } 389 390 if (!FoundNEUse) { 391 if (V == E) 392 return true; 393 394 EphValues.insert(V); 395 if (const User *U = dyn_cast<User>(V)) 396 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 397 J != JE; ++J) { 398 if (isSafeToSpeculativelyExecute(*J)) 399 WorkSet.push_back(*J); 400 } 401 } 402 } 403 404 return false; 405 } 406 407 // Is this an intrinsic that cannot be speculated but also cannot trap? 408 static bool isAssumeLikeIntrinsic(const Instruction *I) { 409 if (const CallInst *CI = dyn_cast<CallInst>(I)) 410 if (Function *F = CI->getCalledFunction()) 411 switch (F->getIntrinsicID()) { 412 default: break; 413 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 414 case Intrinsic::assume: 415 case Intrinsic::dbg_declare: 416 case Intrinsic::dbg_value: 417 case Intrinsic::invariant_start: 418 case Intrinsic::invariant_end: 419 case Intrinsic::lifetime_start: 420 case Intrinsic::lifetime_end: 421 case Intrinsic::objectsize: 422 case Intrinsic::ptr_annotation: 423 case Intrinsic::var_annotation: 424 return true; 425 } 426 427 return false; 428 } 429 430 static bool isValidAssumeForContext(Value *V, const Query &Q) { 431 Instruction *Inv = cast<Instruction>(V); 432 433 // There are two restrictions on the use of an assume: 434 // 1. The assume must dominate the context (or the control flow must 435 // reach the assume whenever it reaches the context). 436 // 2. The context must not be in the assume's set of ephemeral values 437 // (otherwise we will use the assume to prove that the condition 438 // feeding the assume is trivially true, thus causing the removal of 439 // the assume). 440 441 if (Q.DT) { 442 if (Q.DT->dominates(Inv, Q.CxtI)) { 443 return true; 444 } else if (Inv->getParent() == Q.CxtI->getParent()) { 445 // The context comes first, but they're both in the same block. Make sure 446 // there is nothing in between that might interrupt the control flow. 447 for (BasicBlock::const_iterator I = 448 std::next(BasicBlock::const_iterator(Q.CxtI)), 449 IE(Inv); I != IE; ++I) 450 if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I)) 451 return false; 452 453 return !isEphemeralValueOf(Inv, Q.CxtI); 454 } 455 456 return false; 457 } 458 459 // When we don't have a DT, we do a limited search... 460 if (Inv->getParent() == Q.CxtI->getParent()->getSinglePredecessor()) { 461 return true; 462 } else if (Inv->getParent() == Q.CxtI->getParent()) { 463 // Search forward from the assume until we reach the context (or the end 464 // of the block); the common case is that the assume will come first. 465 for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)), 466 IE = Inv->getParent()->end(); I != IE; ++I) 467 if (I == Q.CxtI) 468 return true; 469 470 // The context must come first... 471 for (BasicBlock::const_iterator I = 472 std::next(BasicBlock::const_iterator(Q.CxtI)), 473 IE(Inv); I != IE; ++I) 474 if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I)) 475 return false; 476 477 return !isEphemeralValueOf(Inv, Q.CxtI); 478 } 479 480 return false; 481 } 482 483 bool llvm::isValidAssumeForContext(const Instruction *I, 484 const Instruction *CxtI, 485 const DominatorTree *DT) { 486 return ::isValidAssumeForContext(const_cast<Instruction *>(I), 487 Query(nullptr, CxtI, DT)); 488 } 489 490 template<typename LHS, typename RHS> 491 inline match_combine_or<CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>, 492 CmpClass_match<RHS, LHS, ICmpInst, ICmpInst::Predicate>> 493 m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) { 494 return m_CombineOr(m_ICmp(Pred, L, R), m_ICmp(Pred, R, L)); 495 } 496 497 template<typename LHS, typename RHS> 498 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::And>, 499 BinaryOp_match<RHS, LHS, Instruction::And>> 500 m_c_And(const LHS &L, const RHS &R) { 501 return m_CombineOr(m_And(L, R), m_And(R, L)); 502 } 503 504 template<typename LHS, typename RHS> 505 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Or>, 506 BinaryOp_match<RHS, LHS, Instruction::Or>> 507 m_c_Or(const LHS &L, const RHS &R) { 508 return m_CombineOr(m_Or(L, R), m_Or(R, L)); 509 } 510 511 template<typename LHS, typename RHS> 512 inline match_combine_or<BinaryOp_match<LHS, RHS, Instruction::Xor>, 513 BinaryOp_match<RHS, LHS, Instruction::Xor>> 514 m_c_Xor(const LHS &L, const RHS &R) { 515 return m_CombineOr(m_Xor(L, R), m_Xor(R, L)); 516 } 517 518 /// Compute known bits in 'V' under the assumption that the condition 'Cmp' is 519 /// true (at the context instruction.) This is mostly a utility function for 520 /// the prototype dominating conditions reasoning below. 521 static void computeKnownBitsFromTrueCondition(Value *V, ICmpInst *Cmp, 522 APInt &KnownZero, 523 APInt &KnownOne, 524 const DataLayout &DL, 525 unsigned Depth, const Query &Q) { 526 Value *LHS = Cmp->getOperand(0); 527 Value *RHS = Cmp->getOperand(1); 528 // TODO: We could potentially be more aggressive here. This would be worth 529 // evaluating. If we can, explore commoning this code with the assume 530 // handling logic. 531 if (LHS != V && RHS != V) 532 return; 533 534 const unsigned BitWidth = KnownZero.getBitWidth(); 535 536 switch (Cmp->getPredicate()) { 537 default: 538 // We know nothing from this condition 539 break; 540 // TODO: implement unsigned bound from below (known one bits) 541 // TODO: common condition check implementations with assumes 542 // TODO: implement other patterns from assume (e.g. V & B == A) 543 case ICmpInst::ICMP_SGT: 544 if (LHS == V) { 545 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0); 546 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); 547 if (KnownOneTemp.isAllOnesValue() || KnownZeroTemp.isNegative()) { 548 // We know that the sign bit is zero. 549 KnownZero |= APInt::getSignBit(BitWidth); 550 } 551 } 552 break; 553 case ICmpInst::ICMP_EQ: 554 if (LHS == V) 555 computeKnownBits(RHS, KnownZero, KnownOne, DL, Depth + 1, Q); 556 else if (RHS == V) 557 computeKnownBits(LHS, KnownZero, KnownOne, DL, Depth + 1, Q); 558 else 559 llvm_unreachable("missing use?"); 560 break; 561 case ICmpInst::ICMP_ULE: 562 if (LHS == V) { 563 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0); 564 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); 565 // The known zero bits carry over 566 unsigned SignBits = KnownZeroTemp.countLeadingOnes(); 567 KnownZero |= APInt::getHighBitsSet(BitWidth, SignBits); 568 } 569 break; 570 case ICmpInst::ICMP_ULT: 571 if (LHS == V) { 572 APInt KnownZeroTemp(BitWidth, 0), KnownOneTemp(BitWidth, 0); 573 computeKnownBits(RHS, KnownZeroTemp, KnownOneTemp, DL, Depth + 1, Q); 574 // Whatever high bits in rhs are zero are known to be zero (if rhs is a 575 // power of 2, then one more). 576 unsigned SignBits = KnownZeroTemp.countLeadingOnes(); 577 if (isKnownToBeAPowerOfTwo(RHS, false, Depth + 1, Query(Q, Cmp), DL)) 578 SignBits++; 579 KnownZero |= APInt::getHighBitsSet(BitWidth, SignBits); 580 } 581 break; 582 }; 583 } 584 585 /// Compute known bits in 'V' from conditions which are known to be true along 586 /// all paths leading to the context instruction. In particular, look for 587 /// cases where one branch of an interesting condition dominates the context 588 /// instruction. This does not do general dataflow. 589 /// NOTE: This code is EXPERIMENTAL and currently off by default. 590 static void computeKnownBitsFromDominatingCondition(Value *V, APInt &KnownZero, 591 APInt &KnownOne, 592 const DataLayout &DL, 593 unsigned Depth, 594 const Query &Q) { 595 // Need both the dominator tree and the query location to do anything useful 596 if (!Q.DT || !Q.CxtI) 597 return; 598 Instruction *Cxt = const_cast<Instruction *>(Q.CxtI); 599 600 // Avoid useless work 601 if (auto VI = dyn_cast<Instruction>(V)) 602 if (VI->getParent() == Cxt->getParent()) 603 return; 604 605 // Note: We currently implement two options. It's not clear which of these 606 // will survive long term, we need data for that. 607 // Option 1 - Try walking the dominator tree looking for conditions which 608 // might apply. This works well for local conditions (loop guards, etc..), 609 // but not as well for things far from the context instruction (presuming a 610 // low max blocks explored). If we can set an high enough limit, this would 611 // be all we need. 612 // Option 2 - We restrict out search to those conditions which are uses of 613 // the value we're interested in. This is independent of dom structure, 614 // but is slightly less powerful without looking through lots of use chains. 615 // It does handle conditions far from the context instruction (e.g. early 616 // function exits on entry) really well though. 617 618 // Option 1 - Search the dom tree 619 unsigned NumBlocksExplored = 0; 620 BasicBlock *Current = Cxt->getParent(); 621 while (true) { 622 // Stop searching if we've gone too far up the chain 623 if (NumBlocksExplored >= DomConditionsMaxDomBlocks) 624 break; 625 NumBlocksExplored++; 626 627 if (!Q.DT->getNode(Current)->getIDom()) 628 break; 629 Current = Q.DT->getNode(Current)->getIDom()->getBlock(); 630 if (!Current) 631 // found function entry 632 break; 633 634 BranchInst *BI = dyn_cast<BranchInst>(Current->getTerminator()); 635 if (!BI || BI->isUnconditional()) 636 continue; 637 ICmpInst *Cmp = dyn_cast<ICmpInst>(BI->getCondition()); 638 if (!Cmp) 639 continue; 640 641 // We're looking for conditions that are guaranteed to hold at the context 642 // instruction. Finding a condition where one path dominates the context 643 // isn't enough because both the true and false cases could merge before 644 // the context instruction we're actually interested in. Instead, we need 645 // to ensure that the taken *edge* dominates the context instruction. 646 BasicBlock *BB0 = BI->getSuccessor(0); 647 BasicBlockEdge Edge(BI->getParent(), BB0); 648 if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent())) 649 continue; 650 651 computeKnownBitsFromTrueCondition(V, Cmp, KnownZero, KnownOne, DL, Depth, 652 Q); 653 } 654 655 // Option 2 - Search the other uses of V 656 unsigned NumUsesExplored = 0; 657 for (auto U : V->users()) { 658 // Avoid massive lists 659 if (NumUsesExplored >= DomConditionsMaxUses) 660 break; 661 NumUsesExplored++; 662 // Consider only compare instructions uniquely controlling a branch 663 ICmpInst *Cmp = dyn_cast<ICmpInst>(U); 664 if (!Cmp) 665 continue; 666 667 if (DomConditionsSingleCmpUse && !Cmp->hasOneUse()) 668 continue; 669 670 for (auto *CmpU : Cmp->users()) { 671 BranchInst *BI = dyn_cast<BranchInst>(CmpU); 672 if (!BI || BI->isUnconditional()) 673 continue; 674 // We're looking for conditions that are guaranteed to hold at the 675 // context instruction. Finding a condition where one path dominates 676 // the context isn't enough because both the true and false cases could 677 // merge before the context instruction we're actually interested in. 678 // Instead, we need to ensure that the taken *edge* dominates the context 679 // instruction. 680 BasicBlock *BB0 = BI->getSuccessor(0); 681 BasicBlockEdge Edge(BI->getParent(), BB0); 682 if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent())) 683 continue; 684 685 computeKnownBitsFromTrueCondition(V, Cmp, KnownZero, KnownOne, DL, Depth, 686 Q); 687 } 688 } 689 } 690 691 static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero, 692 APInt &KnownOne, const DataLayout &DL, 693 unsigned Depth, const Query &Q) { 694 // Use of assumptions is context-sensitive. If we don't have a context, we 695 // cannot use them! 696 if (!Q.AC || !Q.CxtI) 697 return; 698 699 unsigned BitWidth = KnownZero.getBitWidth(); 700 701 for (auto &AssumeVH : Q.AC->assumptions()) { 702 if (!AssumeVH) 703 continue; 704 CallInst *I = cast<CallInst>(AssumeVH); 705 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 706 "Got assumption for the wrong function!"); 707 if (Q.ExclInvs.count(I)) 708 continue; 709 710 // Warning: This loop can end up being somewhat performance sensetive. 711 // We're running this loop for once for each value queried resulting in a 712 // runtime of ~O(#assumes * #values). 713 714 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 715 "must be an assume intrinsic"); 716 717 Value *Arg = I->getArgOperand(0); 718 719 if (Arg == V && isValidAssumeForContext(I, Q)) { 720 assert(BitWidth == 1 && "assume operand is not i1?"); 721 KnownZero.clearAllBits(); 722 KnownOne.setAllBits(); 723 return; 724 } 725 726 // The remaining tests are all recursive, so bail out if we hit the limit. 727 if (Depth == MaxDepth) 728 continue; 729 730 Value *A, *B; 731 auto m_V = m_CombineOr(m_Specific(V), 732 m_CombineOr(m_PtrToInt(m_Specific(V)), 733 m_BitCast(m_Specific(V)))); 734 735 CmpInst::Predicate Pred; 736 ConstantInt *C; 737 // assume(v = a) 738 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 739 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 740 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 741 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 742 KnownZero |= RHSKnownZero; 743 KnownOne |= RHSKnownOne; 744 // assume(v & b = a) 745 } else if (match(Arg, 746 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 747 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 748 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 749 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 750 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); 751 computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I)); 752 753 // For those bits in the mask that are known to be one, we can propagate 754 // known bits from the RHS to V. 755 KnownZero |= RHSKnownZero & MaskKnownOne; 756 KnownOne |= RHSKnownOne & MaskKnownOne; 757 // assume(~(v & b) = a) 758 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 759 m_Value(A))) && 760 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 761 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 762 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 763 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); 764 computeKnownBits(B, MaskKnownZero, MaskKnownOne, DL, Depth+1, Query(Q, I)); 765 766 // For those bits in the mask that are known to be one, we can propagate 767 // inverted known bits from the RHS to V. 768 KnownZero |= RHSKnownOne & MaskKnownOne; 769 KnownOne |= RHSKnownZero & MaskKnownOne; 770 // assume(v | b = a) 771 } else if (match(Arg, 772 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 773 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 774 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 775 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 776 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 777 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); 778 779 // For those bits in B that are known to be zero, we can propagate known 780 // bits from the RHS to V. 781 KnownZero |= RHSKnownZero & BKnownZero; 782 KnownOne |= RHSKnownOne & BKnownZero; 783 // assume(~(v | b) = a) 784 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 785 m_Value(A))) && 786 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 787 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 788 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 789 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 790 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); 791 792 // For those bits in B that are known to be zero, we can propagate 793 // inverted known bits from the RHS to V. 794 KnownZero |= RHSKnownOne & BKnownZero; 795 KnownOne |= RHSKnownZero & BKnownZero; 796 // assume(v ^ b = a) 797 } else if (match(Arg, 798 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 799 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 800 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 801 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 802 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 803 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); 804 805 // For those bits in B that are known to be zero, we can propagate known 806 // bits from the RHS to V. For those bits in B that are known to be one, 807 // we can propagate inverted known bits from the RHS to V. 808 KnownZero |= RHSKnownZero & BKnownZero; 809 KnownOne |= RHSKnownOne & BKnownZero; 810 KnownZero |= RHSKnownOne & BKnownOne; 811 KnownOne |= RHSKnownZero & BKnownOne; 812 // assume(~(v ^ b) = a) 813 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 814 m_Value(A))) && 815 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 816 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 817 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 818 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 819 computeKnownBits(B, BKnownZero, BKnownOne, DL, Depth+1, Query(Q, I)); 820 821 // For those bits in B that are known to be zero, we can propagate 822 // inverted known bits from the RHS to V. For those bits in B that are 823 // known to be one, we can propagate known bits from the RHS to V. 824 KnownZero |= RHSKnownOne & BKnownZero; 825 KnownOne |= RHSKnownZero & BKnownZero; 826 KnownZero |= RHSKnownZero & BKnownOne; 827 KnownOne |= RHSKnownOne & BKnownOne; 828 // assume(v << c = a) 829 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 830 m_Value(A))) && 831 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 832 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 833 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 834 // For those bits in RHS that are known, we can propagate them to known 835 // bits in V shifted to the right by C. 836 KnownZero |= RHSKnownZero.lshr(C->getZExtValue()); 837 KnownOne |= RHSKnownOne.lshr(C->getZExtValue()); 838 // assume(~(v << c) = a) 839 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 840 m_Value(A))) && 841 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 842 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 843 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 844 // For those bits in RHS that are known, we can propagate them inverted 845 // to known bits in V shifted to the right by C. 846 KnownZero |= RHSKnownOne.lshr(C->getZExtValue()); 847 KnownOne |= RHSKnownZero.lshr(C->getZExtValue()); 848 // assume(v >> c = a) 849 } else if (match(Arg, 850 m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)), 851 m_AShr(m_V, m_ConstantInt(C))), 852 m_Value(A))) && 853 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 854 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 855 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 856 // For those bits in RHS that are known, we can propagate them to known 857 // bits in V shifted to the right by C. 858 KnownZero |= RHSKnownZero << C->getZExtValue(); 859 KnownOne |= RHSKnownOne << C->getZExtValue(); 860 // assume(~(v >> c) = a) 861 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr( 862 m_LShr(m_V, m_ConstantInt(C)), 863 m_AShr(m_V, m_ConstantInt(C)))), 864 m_Value(A))) && 865 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q)) { 866 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 867 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 868 // For those bits in RHS that are known, we can propagate them inverted 869 // to known bits in V shifted to the right by C. 870 KnownZero |= RHSKnownOne << C->getZExtValue(); 871 KnownOne |= RHSKnownZero << C->getZExtValue(); 872 // assume(v >=_s c) where c is non-negative 873 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 874 Pred == ICmpInst::ICMP_SGE && isValidAssumeForContext(I, Q)) { 875 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 876 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 877 878 if (RHSKnownZero.isNegative()) { 879 // We know that the sign bit is zero. 880 KnownZero |= APInt::getSignBit(BitWidth); 881 } 882 // assume(v >_s c) where c is at least -1. 883 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 884 Pred == ICmpInst::ICMP_SGT && isValidAssumeForContext(I, Q)) { 885 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 886 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 887 888 if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) { 889 // We know that the sign bit is zero. 890 KnownZero |= APInt::getSignBit(BitWidth); 891 } 892 // assume(v <=_s c) where c is negative 893 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 894 Pred == ICmpInst::ICMP_SLE && isValidAssumeForContext(I, Q)) { 895 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 896 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 897 898 if (RHSKnownOne.isNegative()) { 899 // We know that the sign bit is one. 900 KnownOne |= APInt::getSignBit(BitWidth); 901 } 902 // assume(v <_s c) where c is non-positive 903 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 904 Pred == ICmpInst::ICMP_SLT && isValidAssumeForContext(I, Q)) { 905 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 906 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 907 908 if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) { 909 // We know that the sign bit is one. 910 KnownOne |= APInt::getSignBit(BitWidth); 911 } 912 // assume(v <=_u c) 913 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 914 Pred == ICmpInst::ICMP_ULE && isValidAssumeForContext(I, Q)) { 915 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 916 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 917 918 // Whatever high bits in c are zero are known to be zero. 919 KnownZero |= 920 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()); 921 // assume(v <_u c) 922 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 923 Pred == ICmpInst::ICMP_ULT && isValidAssumeForContext(I, Q)) { 924 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 925 computeKnownBits(A, RHSKnownZero, RHSKnownOne, DL, Depth+1, Query(Q, I)); 926 927 // Whatever high bits in c are zero are known to be zero (if c is a power 928 // of 2, then one more). 929 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I), DL)) 930 KnownZero |= 931 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1); 932 else 933 KnownZero |= 934 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()); 935 } 936 } 937 } 938 939 /// Determine which bits of V are known to be either zero or one and return 940 /// them in the KnownZero/KnownOne bit sets. 941 /// 942 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 943 /// we cannot optimize based on the assumption that it is zero without changing 944 /// it to be an explicit zero. If we don't change it to zero, other code could 945 /// optimized based on the contradictory assumption that it is non-zero. 946 /// Because instcombine aggressively folds operations with undef args anyway, 947 /// this won't lose us code quality. 948 /// 949 /// This function is defined on values with integer type, values with pointer 950 /// type, and vectors of integers. In the case 951 /// where V is a vector, known zero, and known one values are the 952 /// same width as the vector element, and the bit is set only if it is true 953 /// for all of the elements in the vector. 954 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 955 const DataLayout &DL, unsigned Depth, const Query &Q) { 956 assert(V && "No Value?"); 957 assert(Depth <= MaxDepth && "Limit Search Depth"); 958 unsigned BitWidth = KnownZero.getBitWidth(); 959 960 assert((V->getType()->isIntOrIntVectorTy() || 961 V->getType()->getScalarType()->isPointerTy()) && 962 "Not integer or pointer type!"); 963 assert((DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && 964 (!V->getType()->isIntOrIntVectorTy() || 965 V->getType()->getScalarSizeInBits() == BitWidth) && 966 KnownZero.getBitWidth() == BitWidth && 967 KnownOne.getBitWidth() == BitWidth && 968 "V, KnownOne and KnownZero should have same BitWidth"); 969 970 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 971 // We know all of the bits for a constant! 972 KnownOne = CI->getValue(); 973 KnownZero = ~KnownOne; 974 return; 975 } 976 // Null and aggregate-zero are all-zeros. 977 if (isa<ConstantPointerNull>(V) || 978 isa<ConstantAggregateZero>(V)) { 979 KnownOne.clearAllBits(); 980 KnownZero = APInt::getAllOnesValue(BitWidth); 981 return; 982 } 983 // Handle a constant vector by taking the intersection of the known bits of 984 // each element. There is no real need to handle ConstantVector here, because 985 // we don't handle undef in any particularly useful way. 986 if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 987 // We know that CDS must be a vector of integers. Take the intersection of 988 // each element. 989 KnownZero.setAllBits(); KnownOne.setAllBits(); 990 APInt Elt(KnownZero.getBitWidth(), 0); 991 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 992 Elt = CDS->getElementAsInteger(i); 993 KnownZero &= ~Elt; 994 KnownOne &= Elt; 995 } 996 return; 997 } 998 999 // The address of an aligned GlobalValue has trailing zeros. 1000 if (auto *GO = dyn_cast<GlobalObject>(V)) { 1001 unsigned Align = GO->getAlignment(); 1002 if (Align == 0) { 1003 if (auto *GVar = dyn_cast<GlobalVariable>(GO)) { 1004 Type *ObjectType = GVar->getType()->getElementType(); 1005 if (ObjectType->isSized()) { 1006 // If the object is defined in the current Module, we'll be giving 1007 // it the preferred alignment. Otherwise, we have to assume that it 1008 // may only have the minimum ABI alignment. 1009 if (!GVar->isDeclaration() && !GVar->isWeakForLinker()) 1010 Align = DL.getPreferredAlignment(GVar); 1011 else 1012 Align = DL.getABITypeAlignment(ObjectType); 1013 } 1014 } 1015 } 1016 if (Align > 0) 1017 KnownZero = APInt::getLowBitsSet(BitWidth, 1018 countTrailingZeros(Align)); 1019 else 1020 KnownZero.clearAllBits(); 1021 KnownOne.clearAllBits(); 1022 return; 1023 } 1024 1025 if (Argument *A = dyn_cast<Argument>(V)) { 1026 unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0; 1027 1028 if (!Align && A->hasStructRetAttr()) { 1029 // An sret parameter has at least the ABI alignment of the return type. 1030 Type *EltTy = cast<PointerType>(A->getType())->getElementType(); 1031 if (EltTy->isSized()) 1032 Align = DL.getABITypeAlignment(EltTy); 1033 } 1034 1035 if (Align) 1036 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); 1037 else 1038 KnownZero.clearAllBits(); 1039 KnownOne.clearAllBits(); 1040 1041 // Don't give up yet... there might be an assumption that provides more 1042 // information... 1043 computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q); 1044 1045 // Or a dominating condition for that matter 1046 if (EnableDomConditions && Depth <= DomConditionsMaxDepth) 1047 computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL, 1048 Depth, Q); 1049 return; 1050 } 1051 1052 // Start out not knowing anything. 1053 KnownZero.clearAllBits(); KnownOne.clearAllBits(); 1054 1055 // Limit search depth. 1056 // All recursive calls that increase depth must come after this. 1057 if (Depth == MaxDepth) 1058 return; 1059 1060 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1061 // the bits of its aliasee. 1062 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1063 if (!GA->mayBeOverridden()) 1064 computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, DL, Depth + 1, Q); 1065 return; 1066 } 1067 1068 // Check whether a nearby assume intrinsic can determine some known bits. 1069 computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q); 1070 1071 // Check whether there's a dominating condition which implies something about 1072 // this value at the given context. 1073 if (EnableDomConditions && Depth <= DomConditionsMaxDepth) 1074 computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL, Depth, 1075 Q); 1076 1077 Operator *I = dyn_cast<Operator>(V); 1078 if (!I) return; 1079 1080 APInt KnownZero2(KnownZero), KnownOne2(KnownOne); 1081 switch (I->getOpcode()) { 1082 default: break; 1083 case Instruction::Load: 1084 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 1085 computeKnownBitsFromRangeMetadata(*MD, KnownZero); 1086 break; 1087 case Instruction::And: { 1088 // If either the LHS or the RHS are Zero, the result is zero. 1089 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q); 1090 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1091 1092 // Output known-1 bits are only known if set in both the LHS & RHS. 1093 KnownOne &= KnownOne2; 1094 // Output known-0 are known to be clear if zero in either the LHS | RHS. 1095 KnownZero |= KnownZero2; 1096 break; 1097 } 1098 case Instruction::Or: { 1099 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q); 1100 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1101 1102 // Output known-0 bits are only known if clear in both the LHS & RHS. 1103 KnownZero &= KnownZero2; 1104 // Output known-1 are known to be set if set in either the LHS | RHS. 1105 KnownOne |= KnownOne2; 1106 break; 1107 } 1108 case Instruction::Xor: { 1109 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, Q); 1110 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1111 1112 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1113 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 1114 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1115 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 1116 KnownZero = KnownZeroOut; 1117 break; 1118 } 1119 case Instruction::Mul: { 1120 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1121 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero, 1122 KnownOne, KnownZero2, KnownOne2, DL, Depth, Q); 1123 break; 1124 } 1125 case Instruction::UDiv: { 1126 // For the purposes of computing leading zeros we can conservatively 1127 // treat a udiv as a logical right shift by the power of 2 known to 1128 // be less than the denominator. 1129 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1130 unsigned LeadZ = KnownZero2.countLeadingOnes(); 1131 1132 KnownOne2.clearAllBits(); 1133 KnownZero2.clearAllBits(); 1134 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1135 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 1136 if (RHSUnknownLeadingOnes != BitWidth) 1137 LeadZ = std::min(BitWidth, 1138 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 1139 1140 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 1141 break; 1142 } 1143 case Instruction::Select: 1144 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, DL, Depth + 1, Q); 1145 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1146 1147 // Only known if known in both the LHS and RHS. 1148 KnownOne &= KnownOne2; 1149 KnownZero &= KnownZero2; 1150 break; 1151 case Instruction::FPTrunc: 1152 case Instruction::FPExt: 1153 case Instruction::FPToUI: 1154 case Instruction::FPToSI: 1155 case Instruction::SIToFP: 1156 case Instruction::UIToFP: 1157 break; // Can't work with floating point. 1158 case Instruction::PtrToInt: 1159 case Instruction::IntToPtr: 1160 case Instruction::AddrSpaceCast: // Pointers could be different sizes. 1161 // FALL THROUGH and handle them the same as zext/trunc. 1162 case Instruction::ZExt: 1163 case Instruction::Trunc: { 1164 Type *SrcTy = I->getOperand(0)->getType(); 1165 1166 unsigned SrcBitWidth; 1167 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1168 // which fall through here. 1169 SrcBitWidth = DL.getTypeSizeInBits(SrcTy->getScalarType()); 1170 1171 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1172 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth); 1173 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth); 1174 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); 1175 KnownZero = KnownZero.zextOrTrunc(BitWidth); 1176 KnownOne = KnownOne.zextOrTrunc(BitWidth); 1177 // Any top bits are known to be zero. 1178 if (BitWidth > SrcBitWidth) 1179 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 1180 break; 1181 } 1182 case Instruction::BitCast: { 1183 Type *SrcTy = I->getOperand(0)->getType(); 1184 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 1185 // TODO: For now, not handling conversions like: 1186 // (bitcast i64 %x to <2 x i32>) 1187 !I->getType()->isVectorTy()) { 1188 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); 1189 break; 1190 } 1191 break; 1192 } 1193 case Instruction::SExt: { 1194 // Compute the bits in the result that are not present in the input. 1195 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1196 1197 KnownZero = KnownZero.trunc(SrcBitWidth); 1198 KnownOne = KnownOne.trunc(SrcBitWidth); 1199 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); 1200 KnownZero = KnownZero.zext(BitWidth); 1201 KnownOne = KnownOne.zext(BitWidth); 1202 1203 // If the sign bit of the input is known set or clear, then we know the 1204 // top bits of the result. 1205 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero 1206 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 1207 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set 1208 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 1209 break; 1210 } 1211 case Instruction::Shl: 1212 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1213 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 1214 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 1215 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); 1216 KnownZero <<= ShiftAmt; 1217 KnownOne <<= ShiftAmt; 1218 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0 1219 } 1220 break; 1221 case Instruction::LShr: 1222 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1223 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 1224 // Compute the new bits that are at the top now. 1225 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 1226 1227 // Unsigned shift right. 1228 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); 1229 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 1230 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 1231 // high bits known zero. 1232 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt); 1233 } 1234 break; 1235 case Instruction::AShr: 1236 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1237 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 1238 // Compute the new bits that are at the top now. 1239 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 1240 1241 // Signed shift right. 1242 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); 1243 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 1244 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 1245 1246 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); 1247 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero. 1248 KnownZero |= HighBits; 1249 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one. 1250 KnownOne |= HighBits; 1251 } 1252 break; 1253 case Instruction::Sub: { 1254 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1255 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1256 KnownZero, KnownOne, KnownZero2, KnownOne2, DL, 1257 Depth, Q); 1258 break; 1259 } 1260 case Instruction::Add: { 1261 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1262 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1263 KnownZero, KnownOne, KnownZero2, KnownOne2, DL, 1264 Depth, Q); 1265 break; 1266 } 1267 case Instruction::SRem: 1268 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1269 APInt RA = Rem->getValue().abs(); 1270 if (RA.isPowerOf2()) { 1271 APInt LowBits = RA - 1; 1272 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL, Depth + 1, 1273 Q); 1274 1275 // The low bits of the first operand are unchanged by the srem. 1276 KnownZero = KnownZero2 & LowBits; 1277 KnownOne = KnownOne2 & LowBits; 1278 1279 // If the first operand is non-negative or has all low bits zero, then 1280 // the upper bits are all zero. 1281 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 1282 KnownZero |= ~LowBits; 1283 1284 // If the first operand is negative and not all low bits are zero, then 1285 // the upper bits are all one. 1286 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 1287 KnownOne |= ~LowBits; 1288 1289 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1290 } 1291 } 1292 1293 // The sign bit is the LHS's sign bit, except when the result of the 1294 // remainder is zero. 1295 if (KnownZero.isNonNegative()) { 1296 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 1297 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, DL, 1298 Depth + 1, Q); 1299 // If it's known zero, our sign bit is also zero. 1300 if (LHSKnownZero.isNegative()) 1301 KnownZero.setBit(BitWidth - 1); 1302 } 1303 1304 break; 1305 case Instruction::URem: { 1306 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1307 APInt RA = Rem->getValue(); 1308 if (RA.isPowerOf2()) { 1309 APInt LowBits = (RA - 1); 1310 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, 1311 Q); 1312 KnownZero |= ~LowBits; 1313 KnownOne &= LowBits; 1314 break; 1315 } 1316 } 1317 1318 // Since the result is less than or equal to either operand, any leading 1319 // zero bits in either operand must also exist in the result. 1320 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, Q); 1321 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, DL, Depth + 1, Q); 1322 1323 unsigned Leaders = std::max(KnownZero.countLeadingOnes(), 1324 KnownZero2.countLeadingOnes()); 1325 KnownOne.clearAllBits(); 1326 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 1327 break; 1328 } 1329 1330 case Instruction::Alloca: { 1331 AllocaInst *AI = cast<AllocaInst>(V); 1332 unsigned Align = AI->getAlignment(); 1333 if (Align == 0) 1334 Align = DL.getABITypeAlignment(AI->getType()->getElementType()); 1335 1336 if (Align > 0) 1337 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); 1338 break; 1339 } 1340 case Instruction::GetElementPtr: { 1341 // Analyze all of the subscripts of this getelementptr instruction 1342 // to determine if we can prove known low zero bits. 1343 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); 1344 computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, DL, 1345 Depth + 1, Q); 1346 unsigned TrailZ = LocalKnownZero.countTrailingOnes(); 1347 1348 gep_type_iterator GTI = gep_type_begin(I); 1349 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1350 Value *Index = I->getOperand(i); 1351 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 1352 // Handle struct member offset arithmetic. 1353 1354 // Handle case when index is vector zeroinitializer 1355 Constant *CIndex = cast<Constant>(Index); 1356 if (CIndex->isZeroValue()) 1357 continue; 1358 1359 if (CIndex->getType()->isVectorTy()) 1360 Index = CIndex->getSplatValue(); 1361 1362 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1363 const StructLayout *SL = DL.getStructLayout(STy); 1364 uint64_t Offset = SL->getElementOffset(Idx); 1365 TrailZ = std::min<unsigned>(TrailZ, 1366 countTrailingZeros(Offset)); 1367 } else { 1368 // Handle array index arithmetic. 1369 Type *IndexedTy = GTI.getIndexedType(); 1370 if (!IndexedTy->isSized()) { 1371 TrailZ = 0; 1372 break; 1373 } 1374 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1375 uint64_t TypeSize = DL.getTypeAllocSize(IndexedTy); 1376 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); 1377 computeKnownBits(Index, LocalKnownZero, LocalKnownOne, DL, Depth + 1, 1378 Q); 1379 TrailZ = std::min(TrailZ, 1380 unsigned(countTrailingZeros(TypeSize) + 1381 LocalKnownZero.countTrailingOnes())); 1382 } 1383 } 1384 1385 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ); 1386 break; 1387 } 1388 case Instruction::PHI: { 1389 PHINode *P = cast<PHINode>(I); 1390 // Handle the case of a simple two-predecessor recurrence PHI. 1391 // There's a lot more that could theoretically be done here, but 1392 // this is sufficient to catch some interesting cases. 1393 if (P->getNumIncomingValues() == 2) { 1394 for (unsigned i = 0; i != 2; ++i) { 1395 Value *L = P->getIncomingValue(i); 1396 Value *R = P->getIncomingValue(!i); 1397 Operator *LU = dyn_cast<Operator>(L); 1398 if (!LU) 1399 continue; 1400 unsigned Opcode = LU->getOpcode(); 1401 // Check for operations that have the property that if 1402 // both their operands have low zero bits, the result 1403 // will have low zero bits. 1404 if (Opcode == Instruction::Add || 1405 Opcode == Instruction::Sub || 1406 Opcode == Instruction::And || 1407 Opcode == Instruction::Or || 1408 Opcode == Instruction::Mul) { 1409 Value *LL = LU->getOperand(0); 1410 Value *LR = LU->getOperand(1); 1411 // Find a recurrence. 1412 if (LL == I) 1413 L = LR; 1414 else if (LR == I) 1415 L = LL; 1416 else 1417 break; 1418 // Ok, we have a PHI of the form L op= R. Check for low 1419 // zero bits. 1420 computeKnownBits(R, KnownZero2, KnownOne2, DL, Depth + 1, Q); 1421 1422 // We need to take the minimum number of known bits 1423 APInt KnownZero3(KnownZero), KnownOne3(KnownOne); 1424 computeKnownBits(L, KnownZero3, KnownOne3, DL, Depth + 1, Q); 1425 1426 KnownZero = APInt::getLowBitsSet(BitWidth, 1427 std::min(KnownZero2.countTrailingOnes(), 1428 KnownZero3.countTrailingOnes())); 1429 break; 1430 } 1431 } 1432 } 1433 1434 // Unreachable blocks may have zero-operand PHI nodes. 1435 if (P->getNumIncomingValues() == 0) 1436 break; 1437 1438 // Otherwise take the unions of the known bit sets of the operands, 1439 // taking conservative care to avoid excessive recursion. 1440 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { 1441 // Skip if every incoming value references to ourself. 1442 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1443 break; 1444 1445 KnownZero = APInt::getAllOnesValue(BitWidth); 1446 KnownOne = APInt::getAllOnesValue(BitWidth); 1447 for (Value *IncValue : P->incoming_values()) { 1448 // Skip direct self references. 1449 if (IncValue == P) continue; 1450 1451 KnownZero2 = APInt(BitWidth, 0); 1452 KnownOne2 = APInt(BitWidth, 0); 1453 // Recurse, but cap the recursion to one level, because we don't 1454 // want to waste time spinning around in loops. 1455 computeKnownBits(IncValue, KnownZero2, KnownOne2, DL, 1456 MaxDepth - 1, Q); 1457 KnownZero &= KnownZero2; 1458 KnownOne &= KnownOne2; 1459 // If all bits have been ruled out, there's no need to check 1460 // more operands. 1461 if (!KnownZero && !KnownOne) 1462 break; 1463 } 1464 } 1465 break; 1466 } 1467 case Instruction::Call: 1468 case Instruction::Invoke: 1469 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range)) 1470 computeKnownBitsFromRangeMetadata(*MD, KnownZero); 1471 // If a range metadata is attached to this IntrinsicInst, intersect the 1472 // explicit range specified by the metadata and the implicit range of 1473 // the intrinsic. 1474 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1475 switch (II->getIntrinsicID()) { 1476 default: break; 1477 case Intrinsic::ctlz: 1478 case Intrinsic::cttz: { 1479 unsigned LowBits = Log2_32(BitWidth)+1; 1480 // If this call is undefined for 0, the result will be less than 2^n. 1481 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1482 LowBits -= 1; 1483 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 1484 break; 1485 } 1486 case Intrinsic::ctpop: { 1487 unsigned LowBits = Log2_32(BitWidth)+1; 1488 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 1489 break; 1490 } 1491 case Intrinsic::x86_sse42_crc32_64_64: 1492 KnownZero |= APInt::getHighBitsSet(64, 32); 1493 break; 1494 } 1495 } 1496 break; 1497 case Instruction::ExtractValue: 1498 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1499 ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1500 if (EVI->getNumIndices() != 1) break; 1501 if (EVI->getIndices()[0] == 0) { 1502 switch (II->getIntrinsicID()) { 1503 default: break; 1504 case Intrinsic::uadd_with_overflow: 1505 case Intrinsic::sadd_with_overflow: 1506 computeKnownBitsAddSub(true, II->getArgOperand(0), 1507 II->getArgOperand(1), false, KnownZero, 1508 KnownOne, KnownZero2, KnownOne2, DL, Depth, Q); 1509 break; 1510 case Intrinsic::usub_with_overflow: 1511 case Intrinsic::ssub_with_overflow: 1512 computeKnownBitsAddSub(false, II->getArgOperand(0), 1513 II->getArgOperand(1), false, KnownZero, 1514 KnownOne, KnownZero2, KnownOne2, DL, Depth, Q); 1515 break; 1516 case Intrinsic::umul_with_overflow: 1517 case Intrinsic::smul_with_overflow: 1518 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1519 KnownZero, KnownOne, KnownZero2, KnownOne2, DL, 1520 Depth, Q); 1521 break; 1522 } 1523 } 1524 } 1525 } 1526 1527 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1528 } 1529 1530 /// Determine whether the sign bit is known to be zero or one. 1531 /// Convenience wrapper around computeKnownBits. 1532 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 1533 const DataLayout &DL, unsigned Depth, const Query &Q) { 1534 unsigned BitWidth = getBitWidth(V->getType(), DL); 1535 if (!BitWidth) { 1536 KnownZero = false; 1537 KnownOne = false; 1538 return; 1539 } 1540 APInt ZeroBits(BitWidth, 0); 1541 APInt OneBits(BitWidth, 0); 1542 computeKnownBits(V, ZeroBits, OneBits, DL, Depth, Q); 1543 KnownOne = OneBits[BitWidth - 1]; 1544 KnownZero = ZeroBits[BitWidth - 1]; 1545 } 1546 1547 /// Return true if the given value is known to have exactly one 1548 /// bit set when defined. For vectors return true if every element is known to 1549 /// be a power of two when defined. Supports values with integer or pointer 1550 /// types and vectors of integers. 1551 bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth, 1552 const Query &Q, const DataLayout &DL) { 1553 if (Constant *C = dyn_cast<Constant>(V)) { 1554 if (C->isNullValue()) 1555 return OrZero; 1556 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1557 return CI->getValue().isPowerOf2(); 1558 // TODO: Handle vector constants. 1559 } 1560 1561 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1562 // it is shifted off the end then the result is undefined. 1563 if (match(V, m_Shl(m_One(), m_Value()))) 1564 return true; 1565 1566 // (signbit) >>l X is clearly a power of two if the one is not shifted off the 1567 // bottom. If it is shifted off the bottom then the result is undefined. 1568 if (match(V, m_LShr(m_SignBit(), m_Value()))) 1569 return true; 1570 1571 // The remaining tests are all recursive, so bail out if we hit the limit. 1572 if (Depth++ == MaxDepth) 1573 return false; 1574 1575 Value *X = nullptr, *Y = nullptr; 1576 // A shift of a power of two is a power of two or zero. 1577 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1578 match(V, m_Shr(m_Value(X), m_Value())))) 1579 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL); 1580 1581 if (ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1582 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q, DL); 1583 1584 if (SelectInst *SI = dyn_cast<SelectInst>(V)) 1585 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q, DL) && 1586 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q, DL); 1587 1588 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1589 // A power of two and'd with anything is a power of two or zero. 1590 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL) || 1591 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q, DL)) 1592 return true; 1593 // X & (-X) is always a power of two or zero. 1594 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1595 return true; 1596 return false; 1597 } 1598 1599 // Adding a power-of-two or zero to the same power-of-two or zero yields 1600 // either the original power-of-two, a larger power-of-two or zero. 1601 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1602 OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1603 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { 1604 if (match(X, m_And(m_Specific(Y), m_Value())) || 1605 match(X, m_And(m_Value(), m_Specific(Y)))) 1606 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q, DL)) 1607 return true; 1608 if (match(Y, m_And(m_Specific(X), m_Value())) || 1609 match(Y, m_And(m_Value(), m_Specific(X)))) 1610 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q, DL)) 1611 return true; 1612 1613 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1614 APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0); 1615 computeKnownBits(X, LHSZeroBits, LHSOneBits, DL, Depth, Q); 1616 1617 APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0); 1618 computeKnownBits(Y, RHSZeroBits, RHSOneBits, DL, Depth, Q); 1619 // If i8 V is a power of two or zero: 1620 // ZeroBits: 1 1 1 0 1 1 1 1 1621 // ~ZeroBits: 0 0 0 1 0 0 0 0 1622 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2()) 1623 // If OrZero isn't set, we cannot give back a zero result. 1624 // Make sure either the LHS or RHS has a bit set. 1625 if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue()) 1626 return true; 1627 } 1628 } 1629 1630 // An exact divide or right shift can only shift off zero bits, so the result 1631 // is a power of two only if the first operand is a power of two and not 1632 // copying a sign bit (sdiv int_min, 2). 1633 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1634 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1635 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1636 Depth, Q, DL); 1637 } 1638 1639 return false; 1640 } 1641 1642 /// \brief Test whether a GEP's result is known to be non-null. 1643 /// 1644 /// Uses properties inherent in a GEP to try to determine whether it is known 1645 /// to be non-null. 1646 /// 1647 /// Currently this routine does not support vector GEPs. 1648 static bool isGEPKnownNonNull(GEPOperator *GEP, const DataLayout &DL, 1649 unsigned Depth, const Query &Q) { 1650 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) 1651 return false; 1652 1653 // FIXME: Support vector-GEPs. 1654 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1655 1656 // If the base pointer is non-null, we cannot walk to a null address with an 1657 // inbounds GEP in address space zero. 1658 if (isKnownNonZero(GEP->getPointerOperand(), DL, Depth, Q)) 1659 return true; 1660 1661 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1662 // If so, then the GEP cannot produce a null pointer, as doing so would 1663 // inherently violate the inbounds contract within address space zero. 1664 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1665 GTI != GTE; ++GTI) { 1666 // Struct types are easy -- they must always be indexed by a constant. 1667 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 1668 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1669 unsigned ElementIdx = OpC->getZExtValue(); 1670 const StructLayout *SL = DL.getStructLayout(STy); 1671 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1672 if (ElementOffset > 0) 1673 return true; 1674 continue; 1675 } 1676 1677 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1678 if (DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1679 continue; 1680 1681 // Fast path the constant operand case both for efficiency and so we don't 1682 // increment Depth when just zipping down an all-constant GEP. 1683 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1684 if (!OpC->isZero()) 1685 return true; 1686 continue; 1687 } 1688 1689 // We post-increment Depth here because while isKnownNonZero increments it 1690 // as well, when we pop back up that increment won't persist. We don't want 1691 // to recurse 10k times just because we have 10k GEP operands. We don't 1692 // bail completely out because we want to handle constant GEPs regardless 1693 // of depth. 1694 if (Depth++ >= MaxDepth) 1695 continue; 1696 1697 if (isKnownNonZero(GTI.getOperand(), DL, Depth, Q)) 1698 return true; 1699 } 1700 1701 return false; 1702 } 1703 1704 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1705 /// ensure that the value it's attached to is never Value? 'RangeType' is 1706 /// is the type of the value described by the range. 1707 static bool rangeMetadataExcludesValue(MDNode* Ranges, 1708 const APInt& Value) { 1709 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1710 assert(NumRanges >= 1); 1711 for (unsigned i = 0; i < NumRanges; ++i) { 1712 ConstantInt *Lower = 1713 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1714 ConstantInt *Upper = 1715 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1716 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1717 if (Range.contains(Value)) 1718 return false; 1719 } 1720 return true; 1721 } 1722 1723 /// Return true if the given value is known to be non-zero when defined. 1724 /// For vectors return true if every element is known to be non-zero when 1725 /// defined. Supports values with integer or pointer type and vectors of 1726 /// integers. 1727 bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth, 1728 const Query &Q) { 1729 if (Constant *C = dyn_cast<Constant>(V)) { 1730 if (C->isNullValue()) 1731 return false; 1732 if (isa<ConstantInt>(C)) 1733 // Must be non-zero due to null test above. 1734 return true; 1735 // TODO: Handle vectors 1736 return false; 1737 } 1738 1739 if (Instruction* I = dyn_cast<Instruction>(V)) { 1740 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) { 1741 // If the possible ranges don't contain zero, then the value is 1742 // definitely non-zero. 1743 if (IntegerType* Ty = dyn_cast<IntegerType>(V->getType())) { 1744 const APInt ZeroValue(Ty->getBitWidth(), 0); 1745 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 1746 return true; 1747 } 1748 } 1749 } 1750 1751 // The remaining tests are all recursive, so bail out if we hit the limit. 1752 if (Depth++ >= MaxDepth) 1753 return false; 1754 1755 // Check for pointer simplifications. 1756 if (V->getType()->isPointerTy()) { 1757 if (isKnownNonNull(V)) 1758 return true; 1759 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 1760 if (isGEPKnownNonNull(GEP, DL, Depth, Q)) 1761 return true; 1762 } 1763 1764 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), DL); 1765 1766 // X | Y != 0 if X != 0 or Y != 0. 1767 Value *X = nullptr, *Y = nullptr; 1768 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 1769 return isKnownNonZero(X, DL, Depth, Q) || isKnownNonZero(Y, DL, Depth, Q); 1770 1771 // ext X != 0 if X != 0. 1772 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 1773 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), DL, Depth, Q); 1774 1775 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 1776 // if the lowest bit is shifted off the end. 1777 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) { 1778 // shl nuw can't remove any non-zero bits. 1779 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1780 if (BO->hasNoUnsignedWrap()) 1781 return isKnownNonZero(X, DL, Depth, Q); 1782 1783 APInt KnownZero(BitWidth, 0); 1784 APInt KnownOne(BitWidth, 0); 1785 computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q); 1786 if (KnownOne[0]) 1787 return true; 1788 } 1789 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 1790 // defined if the sign bit is shifted off the end. 1791 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 1792 // shr exact can only shift out zero bits. 1793 PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 1794 if (BO->isExact()) 1795 return isKnownNonZero(X, DL, Depth, Q); 1796 1797 bool XKnownNonNegative, XKnownNegative; 1798 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q); 1799 if (XKnownNegative) 1800 return true; 1801 } 1802 // div exact can only produce a zero if the dividend is zero. 1803 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 1804 return isKnownNonZero(X, DL, Depth, Q); 1805 } 1806 // X + Y. 1807 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1808 bool XKnownNonNegative, XKnownNegative; 1809 bool YKnownNonNegative, YKnownNegative; 1810 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q); 1811 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, DL, Depth, Q); 1812 1813 // If X and Y are both non-negative (as signed values) then their sum is not 1814 // zero unless both X and Y are zero. 1815 if (XKnownNonNegative && YKnownNonNegative) 1816 if (isKnownNonZero(X, DL, Depth, Q) || isKnownNonZero(Y, DL, Depth, Q)) 1817 return true; 1818 1819 // If X and Y are both negative (as signed values) then their sum is not 1820 // zero unless both X and Y equal INT_MIN. 1821 if (BitWidth && XKnownNegative && YKnownNegative) { 1822 APInt KnownZero(BitWidth, 0); 1823 APInt KnownOne(BitWidth, 0); 1824 APInt Mask = APInt::getSignedMaxValue(BitWidth); 1825 // The sign bit of X is set. If some other bit is set then X is not equal 1826 // to INT_MIN. 1827 computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q); 1828 if ((KnownOne & Mask) != 0) 1829 return true; 1830 // The sign bit of Y is set. If some other bit is set then Y is not equal 1831 // to INT_MIN. 1832 computeKnownBits(Y, KnownZero, KnownOne, DL, Depth, Q); 1833 if ((KnownOne & Mask) != 0) 1834 return true; 1835 } 1836 1837 // The sum of a non-negative number and a power of two is not zero. 1838 if (XKnownNonNegative && 1839 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q, DL)) 1840 return true; 1841 if (YKnownNonNegative && 1842 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q, DL)) 1843 return true; 1844 } 1845 // X * Y. 1846 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 1847 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1848 // If X and Y are non-zero then so is X * Y as long as the multiplication 1849 // does not overflow. 1850 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 1851 isKnownNonZero(X, DL, Depth, Q) && isKnownNonZero(Y, DL, Depth, Q)) 1852 return true; 1853 } 1854 // (C ? X : Y) != 0 if X != 0 and Y != 0. 1855 else if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 1856 if (isKnownNonZero(SI->getTrueValue(), DL, Depth, Q) && 1857 isKnownNonZero(SI->getFalseValue(), DL, Depth, Q)) 1858 return true; 1859 } 1860 1861 if (!BitWidth) return false; 1862 APInt KnownZero(BitWidth, 0); 1863 APInt KnownOne(BitWidth, 0); 1864 computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q); 1865 return KnownOne != 0; 1866 } 1867 1868 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 1869 /// simplify operations downstream. Mask is known to be zero for bits that V 1870 /// cannot have. 1871 /// 1872 /// This function is defined on values with integer type, values with pointer 1873 /// type, and vectors of integers. In the case 1874 /// where V is a vector, the mask, known zero, and known one values are the 1875 /// same width as the vector element, and the bit is set only if it is true 1876 /// for all of the elements in the vector. 1877 bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL, 1878 unsigned Depth, const Query &Q) { 1879 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); 1880 computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q); 1881 return (KnownZero & Mask) == Mask; 1882 } 1883 1884 1885 1886 /// Return the number of times the sign bit of the register is replicated into 1887 /// the other bits. We know that at least 1 bit is always equal to the sign bit 1888 /// (itself), but other cases can give us information. For example, immediately 1889 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 1890 /// other, so we return 3. 1891 /// 1892 /// 'Op' must have a scalar integer type. 1893 /// 1894 unsigned ComputeNumSignBits(Value *V, const DataLayout &DL, unsigned Depth, 1895 const Query &Q) { 1896 unsigned TyBits = DL.getTypeSizeInBits(V->getType()->getScalarType()); 1897 unsigned Tmp, Tmp2; 1898 unsigned FirstAnswer = 1; 1899 1900 // Note that ConstantInt is handled by the general computeKnownBits case 1901 // below. 1902 1903 if (Depth == 6) 1904 return 1; // Limit search depth. 1905 1906 Operator *U = dyn_cast<Operator>(V); 1907 switch (Operator::getOpcode(V)) { 1908 default: break; 1909 case Instruction::SExt: 1910 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 1911 return ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q) + Tmp; 1912 1913 case Instruction::SDiv: { 1914 const APInt *Denominator; 1915 // sdiv X, C -> adds log(C) sign bits. 1916 if (match(U->getOperand(1), m_APInt(Denominator))) { 1917 1918 // Ignore non-positive denominator. 1919 if (!Denominator->isStrictlyPositive()) 1920 break; 1921 1922 // Calculate the incoming numerator bits. 1923 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 1924 1925 // Add floor(log(C)) bits to the numerator bits. 1926 return std::min(TyBits, NumBits + Denominator->logBase2()); 1927 } 1928 break; 1929 } 1930 1931 case Instruction::SRem: { 1932 const APInt *Denominator; 1933 // srem X, C -> we know that the result is within [-C+1,C) when C is a 1934 // positive constant. This let us put a lower bound on the number of sign 1935 // bits. 1936 if (match(U->getOperand(1), m_APInt(Denominator))) { 1937 1938 // Ignore non-positive denominator. 1939 if (!Denominator->isStrictlyPositive()) 1940 break; 1941 1942 // Calculate the incoming numerator bits. SRem by a positive constant 1943 // can't lower the number of sign bits. 1944 unsigned NumrBits = 1945 ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 1946 1947 // Calculate the leading sign bit constraints by examining the 1948 // denominator. Given that the denominator is positive, there are two 1949 // cases: 1950 // 1951 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 1952 // (1 << ceilLogBase2(C)). 1953 // 1954 // 2. the numerator is negative. Then the result range is (-C,0] and 1955 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 1956 // 1957 // Thus a lower bound on the number of sign bits is `TyBits - 1958 // ceilLogBase2(C)`. 1959 1960 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 1961 return std::max(NumrBits, ResBits); 1962 } 1963 break; 1964 } 1965 1966 case Instruction::AShr: { 1967 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 1968 // ashr X, C -> adds C sign bits. Vectors too. 1969 const APInt *ShAmt; 1970 if (match(U->getOperand(1), m_APInt(ShAmt))) { 1971 Tmp += ShAmt->getZExtValue(); 1972 if (Tmp > TyBits) Tmp = TyBits; 1973 } 1974 return Tmp; 1975 } 1976 case Instruction::Shl: { 1977 const APInt *ShAmt; 1978 if (match(U->getOperand(1), m_APInt(ShAmt))) { 1979 // shl destroys sign bits. 1980 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 1981 Tmp2 = ShAmt->getZExtValue(); 1982 if (Tmp2 >= TyBits || // Bad shift. 1983 Tmp2 >= Tmp) break; // Shifted all sign bits out. 1984 return Tmp - Tmp2; 1985 } 1986 break; 1987 } 1988 case Instruction::And: 1989 case Instruction::Or: 1990 case Instruction::Xor: // NOT is handled here. 1991 // Logical binary ops preserve the number of sign bits at the worst. 1992 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 1993 if (Tmp != 1) { 1994 Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q); 1995 FirstAnswer = std::min(Tmp, Tmp2); 1996 // We computed what we know about the sign bits as our first 1997 // answer. Now proceed to the generic code that uses 1998 // computeKnownBits, and pick whichever answer is better. 1999 } 2000 break; 2001 2002 case Instruction::Select: 2003 Tmp = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q); 2004 if (Tmp == 1) return 1; // Early out. 2005 Tmp2 = ComputeNumSignBits(U->getOperand(2), DL, Depth + 1, Q); 2006 return std::min(Tmp, Tmp2); 2007 2008 case Instruction::Add: 2009 // Add can have at most one carry bit. Thus we know that the output 2010 // is, at worst, one more bit than the inputs. 2011 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 2012 if (Tmp == 1) return 1; // Early out. 2013 2014 // Special case decrementing a value (ADD X, -1): 2015 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2016 if (CRHS->isAllOnesValue()) { 2017 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 2018 computeKnownBits(U->getOperand(0), KnownZero, KnownOne, DL, Depth + 1, 2019 Q); 2020 2021 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2022 // sign bits set. 2023 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 2024 return TyBits; 2025 2026 // If we are subtracting one from a positive number, there is no carry 2027 // out of the result. 2028 if (KnownZero.isNegative()) 2029 return Tmp; 2030 } 2031 2032 Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q); 2033 if (Tmp2 == 1) return 1; 2034 return std::min(Tmp, Tmp2)-1; 2035 2036 case Instruction::Sub: 2037 Tmp2 = ComputeNumSignBits(U->getOperand(1), DL, Depth + 1, Q); 2038 if (Tmp2 == 1) return 1; 2039 2040 // Handle NEG. 2041 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2042 if (CLHS->isNullValue()) { 2043 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 2044 computeKnownBits(U->getOperand(1), KnownZero, KnownOne, DL, Depth + 1, 2045 Q); 2046 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2047 // sign bits set. 2048 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 2049 return TyBits; 2050 2051 // If the input is known to be positive (the sign bit is known clear), 2052 // the output of the NEG has the same number of sign bits as the input. 2053 if (KnownZero.isNegative()) 2054 return Tmp2; 2055 2056 // Otherwise, we treat this like a SUB. 2057 } 2058 2059 // Sub can have at most one carry bit. Thus we know that the output 2060 // is, at worst, one more bit than the inputs. 2061 Tmp = ComputeNumSignBits(U->getOperand(0), DL, Depth + 1, Q); 2062 if (Tmp == 1) return 1; // Early out. 2063 return std::min(Tmp, Tmp2)-1; 2064 2065 case Instruction::PHI: { 2066 PHINode *PN = cast<PHINode>(U); 2067 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2068 // Don't analyze large in-degree PHIs. 2069 if (NumIncomingValues > 4) break; 2070 // Unreachable blocks may have zero-operand PHI nodes. 2071 if (NumIncomingValues == 0) break; 2072 2073 // Take the minimum of all incoming values. This can't infinitely loop 2074 // because of our depth threshold. 2075 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), DL, Depth + 1, Q); 2076 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2077 if (Tmp == 1) return Tmp; 2078 Tmp = std::min( 2079 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), DL, Depth + 1, Q)); 2080 } 2081 return Tmp; 2082 } 2083 2084 case Instruction::Trunc: 2085 // FIXME: it's tricky to do anything useful for this, but it is an important 2086 // case for targets like X86. 2087 break; 2088 } 2089 2090 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2091 // use this information. 2092 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 2093 APInt Mask; 2094 computeKnownBits(V, KnownZero, KnownOne, DL, Depth, Q); 2095 2096 if (KnownZero.isNegative()) { // sign bit is 0 2097 Mask = KnownZero; 2098 } else if (KnownOne.isNegative()) { // sign bit is 1; 2099 Mask = KnownOne; 2100 } else { 2101 // Nothing known. 2102 return FirstAnswer; 2103 } 2104 2105 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 2106 // the number of identical bits in the top of the input value. 2107 Mask = ~Mask; 2108 Mask <<= Mask.getBitWidth()-TyBits; 2109 // Return # leading zeros. We use 'min' here in case Val was zero before 2110 // shifting. We don't want to return '64' as for an i32 "0". 2111 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros())); 2112 } 2113 2114 /// This function computes the integer multiple of Base that equals V. 2115 /// If successful, it returns true and returns the multiple in 2116 /// Multiple. If unsuccessful, it returns false. It looks 2117 /// through SExt instructions only if LookThroughSExt is true. 2118 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2119 bool LookThroughSExt, unsigned Depth) { 2120 const unsigned MaxDepth = 6; 2121 2122 assert(V && "No Value?"); 2123 assert(Depth <= MaxDepth && "Limit Search Depth"); 2124 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2125 2126 Type *T = V->getType(); 2127 2128 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2129 2130 if (Base == 0) 2131 return false; 2132 2133 if (Base == 1) { 2134 Multiple = V; 2135 return true; 2136 } 2137 2138 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2139 Constant *BaseVal = ConstantInt::get(T, Base); 2140 if (CO && CO == BaseVal) { 2141 // Multiple is 1. 2142 Multiple = ConstantInt::get(T, 1); 2143 return true; 2144 } 2145 2146 if (CI && CI->getZExtValue() % Base == 0) { 2147 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2148 return true; 2149 } 2150 2151 if (Depth == MaxDepth) return false; // Limit search depth. 2152 2153 Operator *I = dyn_cast<Operator>(V); 2154 if (!I) return false; 2155 2156 switch (I->getOpcode()) { 2157 default: break; 2158 case Instruction::SExt: 2159 if (!LookThroughSExt) return false; 2160 // otherwise fall through to ZExt 2161 case Instruction::ZExt: 2162 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2163 LookThroughSExt, Depth+1); 2164 case Instruction::Shl: 2165 case Instruction::Mul: { 2166 Value *Op0 = I->getOperand(0); 2167 Value *Op1 = I->getOperand(1); 2168 2169 if (I->getOpcode() == Instruction::Shl) { 2170 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2171 if (!Op1CI) return false; 2172 // Turn Op0 << Op1 into Op0 * 2^Op1 2173 APInt Op1Int = Op1CI->getValue(); 2174 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2175 APInt API(Op1Int.getBitWidth(), 0); 2176 API.setBit(BitToSet); 2177 Op1 = ConstantInt::get(V->getContext(), API); 2178 } 2179 2180 Value *Mul0 = nullptr; 2181 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2182 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2183 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2184 if (Op1C->getType()->getPrimitiveSizeInBits() < 2185 MulC->getType()->getPrimitiveSizeInBits()) 2186 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2187 if (Op1C->getType()->getPrimitiveSizeInBits() > 2188 MulC->getType()->getPrimitiveSizeInBits()) 2189 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2190 2191 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2192 Multiple = ConstantExpr::getMul(MulC, Op1C); 2193 return true; 2194 } 2195 2196 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2197 if (Mul0CI->getValue() == 1) { 2198 // V == Base * Op1, so return Op1 2199 Multiple = Op1; 2200 return true; 2201 } 2202 } 2203 2204 Value *Mul1 = nullptr; 2205 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2206 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2207 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2208 if (Op0C->getType()->getPrimitiveSizeInBits() < 2209 MulC->getType()->getPrimitiveSizeInBits()) 2210 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2211 if (Op0C->getType()->getPrimitiveSizeInBits() > 2212 MulC->getType()->getPrimitiveSizeInBits()) 2213 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2214 2215 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2216 Multiple = ConstantExpr::getMul(MulC, Op0C); 2217 return true; 2218 } 2219 2220 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2221 if (Mul1CI->getValue() == 1) { 2222 // V == Base * Op0, so return Op0 2223 Multiple = Op0; 2224 return true; 2225 } 2226 } 2227 } 2228 } 2229 2230 // We could not determine if V is a multiple of Base. 2231 return false; 2232 } 2233 2234 /// Return true if we can prove that the specified FP value is never equal to 2235 /// -0.0. 2236 /// 2237 /// NOTE: this function will need to be revisited when we support non-default 2238 /// rounding modes! 2239 /// 2240 bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { 2241 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 2242 return !CFP->getValueAPF().isNegZero(); 2243 2244 // FIXME: Magic number! At the least, this should be given a name because it's 2245 // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to 2246 // expose it as a parameter, so it can be used for testing / experimenting. 2247 if (Depth == 6) 2248 return false; // Limit search depth. 2249 2250 const Operator *I = dyn_cast<Operator>(V); 2251 if (!I) return false; 2252 2253 // Check if the nsz fast-math flag is set 2254 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I)) 2255 if (FPO->hasNoSignedZeros()) 2256 return true; 2257 2258 // (add x, 0.0) is guaranteed to return +0.0, not -0.0. 2259 if (I->getOpcode() == Instruction::FAdd) 2260 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1))) 2261 if (CFP->isNullValue()) 2262 return true; 2263 2264 // sitofp and uitofp turn into +0.0 for zero. 2265 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) 2266 return true; 2267 2268 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2269 // sqrt(-0.0) = -0.0, no other negative results are possible. 2270 if (II->getIntrinsicID() == Intrinsic::sqrt) 2271 return CannotBeNegativeZero(II->getArgOperand(0), Depth+1); 2272 2273 if (const CallInst *CI = dyn_cast<CallInst>(I)) 2274 if (const Function *F = CI->getCalledFunction()) { 2275 if (F->isDeclaration()) { 2276 // abs(x) != -0.0 2277 if (F->getName() == "abs") return true; 2278 // fabs[lf](x) != -0.0 2279 if (F->getName() == "fabs") return true; 2280 if (F->getName() == "fabsf") return true; 2281 if (F->getName() == "fabsl") return true; 2282 if (F->getName() == "sqrt" || F->getName() == "sqrtf" || 2283 F->getName() == "sqrtl") 2284 return CannotBeNegativeZero(CI->getArgOperand(0), Depth+1); 2285 } 2286 } 2287 2288 return false; 2289 } 2290 2291 bool llvm::CannotBeOrderedLessThanZero(const Value *V, unsigned Depth) { 2292 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 2293 return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero(); 2294 2295 // FIXME: Magic number! At the least, this should be given a name because it's 2296 // used similarly in CannotBeNegativeZero(). A better fix may be to 2297 // expose it as a parameter, so it can be used for testing / experimenting. 2298 if (Depth == 6) 2299 return false; // Limit search depth. 2300 2301 const Operator *I = dyn_cast<Operator>(V); 2302 if (!I) return false; 2303 2304 switch (I->getOpcode()) { 2305 default: break; 2306 case Instruction::FMul: 2307 // x*x is always non-negative or a NaN. 2308 if (I->getOperand(0) == I->getOperand(1)) 2309 return true; 2310 // Fall through 2311 case Instruction::FAdd: 2312 case Instruction::FDiv: 2313 case Instruction::FRem: 2314 return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) && 2315 CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1); 2316 case Instruction::FPExt: 2317 case Instruction::FPTrunc: 2318 // Widening/narrowing never change sign. 2319 return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1); 2320 case Instruction::Call: 2321 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 2322 switch (II->getIntrinsicID()) { 2323 default: break; 2324 case Intrinsic::exp: 2325 case Intrinsic::exp2: 2326 case Intrinsic::fabs: 2327 case Intrinsic::sqrt: 2328 return true; 2329 case Intrinsic::powi: 2330 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 2331 // powi(x,n) is non-negative if n is even. 2332 if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0) 2333 return true; 2334 } 2335 return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1); 2336 case Intrinsic::fma: 2337 case Intrinsic::fmuladd: 2338 // x*x+y is non-negative if y is non-negative. 2339 return I->getOperand(0) == I->getOperand(1) && 2340 CannotBeOrderedLessThanZero(I->getOperand(2), Depth+1); 2341 } 2342 break; 2343 } 2344 return false; 2345 } 2346 2347 /// If the specified value can be set by repeating the same byte in memory, 2348 /// return the i8 value that it is represented with. This is 2349 /// true for all i8 values obviously, but is also true for i32 0, i32 -1, 2350 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 2351 /// byte store (e.g. i16 0x1234), return null. 2352 Value *llvm::isBytewiseValue(Value *V) { 2353 // All byte-wide stores are splatable, even of arbitrary variables. 2354 if (V->getType()->isIntegerTy(8)) return V; 2355 2356 // Handle 'null' ConstantArrayZero etc. 2357 if (Constant *C = dyn_cast<Constant>(V)) 2358 if (C->isNullValue()) 2359 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 2360 2361 // Constant float and double values can be handled as integer values if the 2362 // corresponding integer value is "byteable". An important case is 0.0. 2363 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2364 if (CFP->getType()->isFloatTy()) 2365 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 2366 if (CFP->getType()->isDoubleTy()) 2367 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 2368 // Don't handle long double formats, which have strange constraints. 2369 } 2370 2371 // We can handle constant integers that are multiple of 8 bits. 2372 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 2373 if (CI->getBitWidth() % 8 == 0) { 2374 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 2375 2376 if (!CI->getValue().isSplat(8)) 2377 return nullptr; 2378 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8)); 2379 } 2380 } 2381 2382 // A ConstantDataArray/Vector is splatable if all its members are equal and 2383 // also splatable. 2384 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 2385 Value *Elt = CA->getElementAsConstant(0); 2386 Value *Val = isBytewiseValue(Elt); 2387 if (!Val) 2388 return nullptr; 2389 2390 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 2391 if (CA->getElementAsConstant(I) != Elt) 2392 return nullptr; 2393 2394 return Val; 2395 } 2396 2397 // Conceptually, we could handle things like: 2398 // %a = zext i8 %X to i16 2399 // %b = shl i16 %a, 8 2400 // %c = or i16 %a, %b 2401 // but until there is an example that actually needs this, it doesn't seem 2402 // worth worrying about. 2403 return nullptr; 2404 } 2405 2406 2407 // This is the recursive version of BuildSubAggregate. It takes a few different 2408 // arguments. Idxs is the index within the nested struct From that we are 2409 // looking at now (which is of type IndexedType). IdxSkip is the number of 2410 // indices from Idxs that should be left out when inserting into the resulting 2411 // struct. To is the result struct built so far, new insertvalue instructions 2412 // build on that. 2413 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 2414 SmallVectorImpl<unsigned> &Idxs, 2415 unsigned IdxSkip, 2416 Instruction *InsertBefore) { 2417 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType); 2418 if (STy) { 2419 // Save the original To argument so we can modify it 2420 Value *OrigTo = To; 2421 // General case, the type indexed by Idxs is a struct 2422 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2423 // Process each struct element recursively 2424 Idxs.push_back(i); 2425 Value *PrevTo = To; 2426 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 2427 InsertBefore); 2428 Idxs.pop_back(); 2429 if (!To) { 2430 // Couldn't find any inserted value for this index? Cleanup 2431 while (PrevTo != OrigTo) { 2432 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 2433 PrevTo = Del->getAggregateOperand(); 2434 Del->eraseFromParent(); 2435 } 2436 // Stop processing elements 2437 break; 2438 } 2439 } 2440 // If we successfully found a value for each of our subaggregates 2441 if (To) 2442 return To; 2443 } 2444 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 2445 // the struct's elements had a value that was inserted directly. In the latter 2446 // case, perhaps we can't determine each of the subelements individually, but 2447 // we might be able to find the complete struct somewhere. 2448 2449 // Find the value that is at that particular spot 2450 Value *V = FindInsertedValue(From, Idxs); 2451 2452 if (!V) 2453 return nullptr; 2454 2455 // Insert the value in the new (sub) aggregrate 2456 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 2457 "tmp", InsertBefore); 2458 } 2459 2460 // This helper takes a nested struct and extracts a part of it (which is again a 2461 // struct) into a new value. For example, given the struct: 2462 // { a, { b, { c, d }, e } } 2463 // and the indices "1, 1" this returns 2464 // { c, d }. 2465 // 2466 // It does this by inserting an insertvalue for each element in the resulting 2467 // struct, as opposed to just inserting a single struct. This will only work if 2468 // each of the elements of the substruct are known (ie, inserted into From by an 2469 // insertvalue instruction somewhere). 2470 // 2471 // All inserted insertvalue instructions are inserted before InsertBefore 2472 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 2473 Instruction *InsertBefore) { 2474 assert(InsertBefore && "Must have someplace to insert!"); 2475 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 2476 idx_range); 2477 Value *To = UndefValue::get(IndexedType); 2478 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 2479 unsigned IdxSkip = Idxs.size(); 2480 2481 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 2482 } 2483 2484 /// Given an aggregrate and an sequence of indices, see if 2485 /// the scalar value indexed is already around as a register, for example if it 2486 /// were inserted directly into the aggregrate. 2487 /// 2488 /// If InsertBefore is not null, this function will duplicate (modified) 2489 /// insertvalues when a part of a nested struct is extracted. 2490 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 2491 Instruction *InsertBefore) { 2492 // Nothing to index? Just return V then (this is useful at the end of our 2493 // recursion). 2494 if (idx_range.empty()) 2495 return V; 2496 // We have indices, so V should have an indexable type. 2497 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 2498 "Not looking at a struct or array?"); 2499 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 2500 "Invalid indices for type?"); 2501 2502 if (Constant *C = dyn_cast<Constant>(V)) { 2503 C = C->getAggregateElement(idx_range[0]); 2504 if (!C) return nullptr; 2505 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 2506 } 2507 2508 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 2509 // Loop the indices for the insertvalue instruction in parallel with the 2510 // requested indices 2511 const unsigned *req_idx = idx_range.begin(); 2512 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 2513 i != e; ++i, ++req_idx) { 2514 if (req_idx == idx_range.end()) { 2515 // We can't handle this without inserting insertvalues 2516 if (!InsertBefore) 2517 return nullptr; 2518 2519 // The requested index identifies a part of a nested aggregate. Handle 2520 // this specially. For example, 2521 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 2522 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 2523 // %C = extractvalue {i32, { i32, i32 } } %B, 1 2524 // This can be changed into 2525 // %A = insertvalue {i32, i32 } undef, i32 10, 0 2526 // %C = insertvalue {i32, i32 } %A, i32 11, 1 2527 // which allows the unused 0,0 element from the nested struct to be 2528 // removed. 2529 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 2530 InsertBefore); 2531 } 2532 2533 // This insert value inserts something else than what we are looking for. 2534 // See if the (aggregrate) value inserted into has the value we are 2535 // looking for, then. 2536 if (*req_idx != *i) 2537 return FindInsertedValue(I->getAggregateOperand(), idx_range, 2538 InsertBefore); 2539 } 2540 // If we end up here, the indices of the insertvalue match with those 2541 // requested (though possibly only partially). Now we recursively look at 2542 // the inserted value, passing any remaining indices. 2543 return FindInsertedValue(I->getInsertedValueOperand(), 2544 makeArrayRef(req_idx, idx_range.end()), 2545 InsertBefore); 2546 } 2547 2548 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 2549 // If we're extracting a value from an aggregrate that was extracted from 2550 // something else, we can extract from that something else directly instead. 2551 // However, we will need to chain I's indices with the requested indices. 2552 2553 // Calculate the number of indices required 2554 unsigned size = I->getNumIndices() + idx_range.size(); 2555 // Allocate some space to put the new indices in 2556 SmallVector<unsigned, 5> Idxs; 2557 Idxs.reserve(size); 2558 // Add indices from the extract value instruction 2559 Idxs.append(I->idx_begin(), I->idx_end()); 2560 2561 // Add requested indices 2562 Idxs.append(idx_range.begin(), idx_range.end()); 2563 2564 assert(Idxs.size() == size 2565 && "Number of indices added not correct?"); 2566 2567 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 2568 } 2569 // Otherwise, we don't know (such as, extracting from a function return value 2570 // or load instruction) 2571 return nullptr; 2572 } 2573 2574 /// Analyze the specified pointer to see if it can be expressed as a base 2575 /// pointer plus a constant offset. Return the base and offset to the caller. 2576 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 2577 const DataLayout &DL) { 2578 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType()); 2579 APInt ByteOffset(BitWidth, 0); 2580 while (1) { 2581 if (Ptr->getType()->isVectorTy()) 2582 break; 2583 2584 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 2585 APInt GEPOffset(BitWidth, 0); 2586 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 2587 break; 2588 2589 ByteOffset += GEPOffset; 2590 2591 Ptr = GEP->getPointerOperand(); 2592 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 2593 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 2594 Ptr = cast<Operator>(Ptr)->getOperand(0); 2595 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 2596 if (GA->mayBeOverridden()) 2597 break; 2598 Ptr = GA->getAliasee(); 2599 } else { 2600 break; 2601 } 2602 } 2603 Offset = ByteOffset.getSExtValue(); 2604 return Ptr; 2605 } 2606 2607 2608 /// This function computes the length of a null-terminated C string pointed to 2609 /// by V. If successful, it returns true and returns the string in Str. 2610 /// If unsuccessful, it returns false. 2611 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 2612 uint64_t Offset, bool TrimAtNul) { 2613 assert(V); 2614 2615 // Look through bitcast instructions and geps. 2616 V = V->stripPointerCasts(); 2617 2618 // If the value is a GEP instruction or constant expression, treat it as an 2619 // offset. 2620 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 2621 // Make sure the GEP has exactly three arguments. 2622 if (GEP->getNumOperands() != 3) 2623 return false; 2624 2625 // Make sure the index-ee is a pointer to array of i8. 2626 PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType()); 2627 ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType()); 2628 if (!AT || !AT->getElementType()->isIntegerTy(8)) 2629 return false; 2630 2631 // Check to make sure that the first operand of the GEP is an integer and 2632 // has value 0 so that we are sure we're indexing into the initializer. 2633 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 2634 if (!FirstIdx || !FirstIdx->isZero()) 2635 return false; 2636 2637 // If the second index isn't a ConstantInt, then this is a variable index 2638 // into the array. If this occurs, we can't say anything meaningful about 2639 // the string. 2640 uint64_t StartIdx = 0; 2641 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 2642 StartIdx = CI->getZExtValue(); 2643 else 2644 return false; 2645 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset, 2646 TrimAtNul); 2647 } 2648 2649 // The GEP instruction, constant or instruction, must reference a global 2650 // variable that is a constant and is initialized. The referenced constant 2651 // initializer is the array that we'll use for optimization. 2652 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 2653 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 2654 return false; 2655 2656 // Handle the all-zeros case 2657 if (GV->getInitializer()->isNullValue()) { 2658 // This is a degenerate case. The initializer is constant zero so the 2659 // length of the string must be zero. 2660 Str = ""; 2661 return true; 2662 } 2663 2664 // Must be a Constant Array 2665 const ConstantDataArray *Array = 2666 dyn_cast<ConstantDataArray>(GV->getInitializer()); 2667 if (!Array || !Array->isString()) 2668 return false; 2669 2670 // Get the number of elements in the array 2671 uint64_t NumElts = Array->getType()->getArrayNumElements(); 2672 2673 // Start out with the entire array in the StringRef. 2674 Str = Array->getAsString(); 2675 2676 if (Offset > NumElts) 2677 return false; 2678 2679 // Skip over 'offset' bytes. 2680 Str = Str.substr(Offset); 2681 2682 if (TrimAtNul) { 2683 // Trim off the \0 and anything after it. If the array is not nul 2684 // terminated, we just return the whole end of string. The client may know 2685 // some other way that the string is length-bound. 2686 Str = Str.substr(0, Str.find('\0')); 2687 } 2688 return true; 2689 } 2690 2691 // These next two are very similar to the above, but also look through PHI 2692 // nodes. 2693 // TODO: See if we can integrate these two together. 2694 2695 /// If we can compute the length of the string pointed to by 2696 /// the specified pointer, return 'len+1'. If we can't, return 0. 2697 static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) { 2698 // Look through noop bitcast instructions. 2699 V = V->stripPointerCasts(); 2700 2701 // If this is a PHI node, there are two cases: either we have already seen it 2702 // or we haven't. 2703 if (PHINode *PN = dyn_cast<PHINode>(V)) { 2704 if (!PHIs.insert(PN).second) 2705 return ~0ULL; // already in the set. 2706 2707 // If it was new, see if all the input strings are the same length. 2708 uint64_t LenSoFar = ~0ULL; 2709 for (Value *IncValue : PN->incoming_values()) { 2710 uint64_t Len = GetStringLengthH(IncValue, PHIs); 2711 if (Len == 0) return 0; // Unknown length -> unknown. 2712 2713 if (Len == ~0ULL) continue; 2714 2715 if (Len != LenSoFar && LenSoFar != ~0ULL) 2716 return 0; // Disagree -> unknown. 2717 LenSoFar = Len; 2718 } 2719 2720 // Success, all agree. 2721 return LenSoFar; 2722 } 2723 2724 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 2725 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 2726 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs); 2727 if (Len1 == 0) return 0; 2728 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs); 2729 if (Len2 == 0) return 0; 2730 if (Len1 == ~0ULL) return Len2; 2731 if (Len2 == ~0ULL) return Len1; 2732 if (Len1 != Len2) return 0; 2733 return Len1; 2734 } 2735 2736 // Otherwise, see if we can read the string. 2737 StringRef StrData; 2738 if (!getConstantStringInfo(V, StrData)) 2739 return 0; 2740 2741 return StrData.size()+1; 2742 } 2743 2744 /// If we can compute the length of the string pointed to by 2745 /// the specified pointer, return 'len+1'. If we can't, return 0. 2746 uint64_t llvm::GetStringLength(Value *V) { 2747 if (!V->getType()->isPointerTy()) return 0; 2748 2749 SmallPtrSet<PHINode*, 32> PHIs; 2750 uint64_t Len = GetStringLengthH(V, PHIs); 2751 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 2752 // an empty string as a length. 2753 return Len == ~0ULL ? 1 : Len; 2754 } 2755 2756 /// \brief \p PN defines a loop-variant pointer to an object. Check if the 2757 /// previous iteration of the loop was referring to the same object as \p PN. 2758 static bool isSameUnderlyingObjectInLoop(PHINode *PN, LoopInfo *LI) { 2759 // Find the loop-defined value. 2760 Loop *L = LI->getLoopFor(PN->getParent()); 2761 if (PN->getNumIncomingValues() != 2) 2762 return true; 2763 2764 // Find the value from previous iteration. 2765 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 2766 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 2767 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 2768 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 2769 return true; 2770 2771 // If a new pointer is loaded in the loop, the pointer references a different 2772 // object in every iteration. E.g.: 2773 // for (i) 2774 // int *p = a[i]; 2775 // ... 2776 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 2777 if (!L->isLoopInvariant(Load->getPointerOperand())) 2778 return false; 2779 return true; 2780 } 2781 2782 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 2783 unsigned MaxLookup) { 2784 if (!V->getType()->isPointerTy()) 2785 return V; 2786 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 2787 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 2788 V = GEP->getPointerOperand(); 2789 } else if (Operator::getOpcode(V) == Instruction::BitCast || 2790 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 2791 V = cast<Operator>(V)->getOperand(0); 2792 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 2793 if (GA->mayBeOverridden()) 2794 return V; 2795 V = GA->getAliasee(); 2796 } else { 2797 // See if InstructionSimplify knows any relevant tricks. 2798 if (Instruction *I = dyn_cast<Instruction>(V)) 2799 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 2800 if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) { 2801 V = Simplified; 2802 continue; 2803 } 2804 2805 return V; 2806 } 2807 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 2808 } 2809 return V; 2810 } 2811 2812 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 2813 const DataLayout &DL, LoopInfo *LI, 2814 unsigned MaxLookup) { 2815 SmallPtrSet<Value *, 4> Visited; 2816 SmallVector<Value *, 4> Worklist; 2817 Worklist.push_back(V); 2818 do { 2819 Value *P = Worklist.pop_back_val(); 2820 P = GetUnderlyingObject(P, DL, MaxLookup); 2821 2822 if (!Visited.insert(P).second) 2823 continue; 2824 2825 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 2826 Worklist.push_back(SI->getTrueValue()); 2827 Worklist.push_back(SI->getFalseValue()); 2828 continue; 2829 } 2830 2831 if (PHINode *PN = dyn_cast<PHINode>(P)) { 2832 // If this PHI changes the underlying object in every iteration of the 2833 // loop, don't look through it. Consider: 2834 // int **A; 2835 // for (i) { 2836 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 2837 // Curr = A[i]; 2838 // *Prev, *Curr; 2839 // 2840 // Prev is tracking Curr one iteration behind so they refer to different 2841 // underlying objects. 2842 if (!LI || !LI->isLoopHeader(PN->getParent()) || 2843 isSameUnderlyingObjectInLoop(PN, LI)) 2844 for (Value *IncValue : PN->incoming_values()) 2845 Worklist.push_back(IncValue); 2846 continue; 2847 } 2848 2849 Objects.push_back(P); 2850 } while (!Worklist.empty()); 2851 } 2852 2853 /// Return true if the only users of this pointer are lifetime markers. 2854 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 2855 for (const User *U : V->users()) { 2856 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 2857 if (!II) return false; 2858 2859 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 2860 II->getIntrinsicID() != Intrinsic::lifetime_end) 2861 return false; 2862 } 2863 return true; 2864 } 2865 2866 static bool isDereferenceableFromAttribute(const Value *BV, APInt Offset, 2867 Type *Ty, const DataLayout &DL, 2868 const Instruction *CtxI, 2869 const DominatorTree *DT, 2870 const TargetLibraryInfo *TLI) { 2871 assert(Offset.isNonNegative() && "offset can't be negative"); 2872 assert(Ty->isSized() && "must be sized"); 2873 2874 APInt DerefBytes(Offset.getBitWidth(), 0); 2875 bool CheckForNonNull = false; 2876 if (const Argument *A = dyn_cast<Argument>(BV)) { 2877 DerefBytes = A->getDereferenceableBytes(); 2878 if (!DerefBytes.getBoolValue()) { 2879 DerefBytes = A->getDereferenceableOrNullBytes(); 2880 CheckForNonNull = true; 2881 } 2882 } else if (auto CS = ImmutableCallSite(BV)) { 2883 DerefBytes = CS.getDereferenceableBytes(0); 2884 if (!DerefBytes.getBoolValue()) { 2885 DerefBytes = CS.getDereferenceableOrNullBytes(0); 2886 CheckForNonNull = true; 2887 } 2888 } else if (const LoadInst *LI = dyn_cast<LoadInst>(BV)) { 2889 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) { 2890 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); 2891 DerefBytes = CI->getLimitedValue(); 2892 } 2893 if (!DerefBytes.getBoolValue()) { 2894 if (MDNode *MD = 2895 LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) { 2896 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0)); 2897 DerefBytes = CI->getLimitedValue(); 2898 } 2899 CheckForNonNull = true; 2900 } 2901 } 2902 2903 if (DerefBytes.getBoolValue()) 2904 if (DerefBytes.uge(Offset + DL.getTypeStoreSize(Ty))) 2905 if (!CheckForNonNull || isKnownNonNullAt(BV, CtxI, DT, TLI)) 2906 return true; 2907 2908 return false; 2909 } 2910 2911 static bool isDereferenceableFromAttribute(const Value *V, const DataLayout &DL, 2912 const Instruction *CtxI, 2913 const DominatorTree *DT, 2914 const TargetLibraryInfo *TLI) { 2915 Type *VTy = V->getType(); 2916 Type *Ty = VTy->getPointerElementType(); 2917 if (!Ty->isSized()) 2918 return false; 2919 2920 APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); 2921 return isDereferenceableFromAttribute(V, Offset, Ty, DL, CtxI, DT, TLI); 2922 } 2923 2924 /// Return true if Value is always a dereferenceable pointer. 2925 /// 2926 /// Test if V is always a pointer to allocated and suitably aligned memory for 2927 /// a simple load or store. 2928 static bool isDereferenceablePointer(const Value *V, const DataLayout &DL, 2929 const Instruction *CtxI, 2930 const DominatorTree *DT, 2931 const TargetLibraryInfo *TLI, 2932 SmallPtrSetImpl<const Value *> &Visited) { 2933 // Note that it is not safe to speculate into a malloc'd region because 2934 // malloc may return null. 2935 2936 // These are obviously ok. 2937 if (isa<AllocaInst>(V)) return true; 2938 2939 // It's not always safe to follow a bitcast, for example: 2940 // bitcast i8* (alloca i8) to i32* 2941 // would result in a 4-byte load from a 1-byte alloca. However, 2942 // if we're casting from a pointer from a type of larger size 2943 // to a type of smaller size (or the same size), and the alignment 2944 // is at least as large as for the resulting pointer type, then 2945 // we can look through the bitcast. 2946 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V)) { 2947 Type *STy = BC->getSrcTy()->getPointerElementType(), 2948 *DTy = BC->getDestTy()->getPointerElementType(); 2949 if (STy->isSized() && DTy->isSized() && 2950 (DL.getTypeStoreSize(STy) >= DL.getTypeStoreSize(DTy)) && 2951 (DL.getABITypeAlignment(STy) >= DL.getABITypeAlignment(DTy))) 2952 return isDereferenceablePointer(BC->getOperand(0), DL, CtxI, 2953 DT, TLI, Visited); 2954 } 2955 2956 // Global variables which can't collapse to null are ok. 2957 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 2958 return !GV->hasExternalWeakLinkage(); 2959 2960 // byval arguments are okay. 2961 if (const Argument *A = dyn_cast<Argument>(V)) 2962 if (A->hasByValAttr()) 2963 return true; 2964 2965 if (isDereferenceableFromAttribute(V, DL, CtxI, DT, TLI)) 2966 return true; 2967 2968 // For GEPs, determine if the indexing lands within the allocated object. 2969 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 2970 // Conservatively require that the base pointer be fully dereferenceable. 2971 if (!Visited.insert(GEP->getOperand(0)).second) 2972 return false; 2973 if (!isDereferenceablePointer(GEP->getOperand(0), DL, CtxI, 2974 DT, TLI, Visited)) 2975 return false; 2976 // Check the indices. 2977 gep_type_iterator GTI = gep_type_begin(GEP); 2978 for (User::const_op_iterator I = GEP->op_begin()+1, 2979 E = GEP->op_end(); I != E; ++I) { 2980 Value *Index = *I; 2981 Type *Ty = *GTI++; 2982 // Struct indices can't be out of bounds. 2983 if (isa<StructType>(Ty)) 2984 continue; 2985 ConstantInt *CI = dyn_cast<ConstantInt>(Index); 2986 if (!CI) 2987 return false; 2988 // Zero is always ok. 2989 if (CI->isZero()) 2990 continue; 2991 // Check to see that it's within the bounds of an array. 2992 ArrayType *ATy = dyn_cast<ArrayType>(Ty); 2993 if (!ATy) 2994 return false; 2995 if (CI->getValue().getActiveBits() > 64) 2996 return false; 2997 if (CI->getZExtValue() >= ATy->getNumElements()) 2998 return false; 2999 } 3000 // Indices check out; this is dereferenceable. 3001 return true; 3002 } 3003 3004 // For gc.relocate, look through relocations 3005 if (const IntrinsicInst *I = dyn_cast<IntrinsicInst>(V)) 3006 if (I->getIntrinsicID() == Intrinsic::experimental_gc_relocate) { 3007 GCRelocateOperands RelocateInst(I); 3008 return isDereferenceablePointer(RelocateInst.getDerivedPtr(), DL, CtxI, 3009 DT, TLI, Visited); 3010 } 3011 3012 if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V)) 3013 return isDereferenceablePointer(ASC->getOperand(0), DL, CtxI, 3014 DT, TLI, Visited); 3015 3016 // If we don't know, assume the worst. 3017 return false; 3018 } 3019 3020 bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL, 3021 const Instruction *CtxI, 3022 const DominatorTree *DT, 3023 const TargetLibraryInfo *TLI) { 3024 // When dereferenceability information is provided by a dereferenceable 3025 // attribute, we know exactly how many bytes are dereferenceable. If we can 3026 // determine the exact offset to the attributed variable, we can use that 3027 // information here. 3028 Type *VTy = V->getType(); 3029 Type *Ty = VTy->getPointerElementType(); 3030 if (Ty->isSized()) { 3031 APInt Offset(DL.getTypeStoreSizeInBits(VTy), 0); 3032 const Value *BV = V->stripAndAccumulateInBoundsConstantOffsets(DL, Offset); 3033 3034 if (Offset.isNonNegative()) 3035 if (isDereferenceableFromAttribute(BV, Offset, Ty, DL, 3036 CtxI, DT, TLI)) 3037 return true; 3038 } 3039 3040 SmallPtrSet<const Value *, 32> Visited; 3041 return ::isDereferenceablePointer(V, DL, CtxI, DT, TLI, Visited); 3042 } 3043 3044 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3045 const Instruction *CtxI, 3046 const DominatorTree *DT, 3047 const TargetLibraryInfo *TLI) { 3048 const Operator *Inst = dyn_cast<Operator>(V); 3049 if (!Inst) 3050 return false; 3051 3052 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3053 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3054 if (C->canTrap()) 3055 return false; 3056 3057 switch (Inst->getOpcode()) { 3058 default: 3059 return true; 3060 case Instruction::UDiv: 3061 case Instruction::URem: { 3062 // x / y is undefined if y == 0. 3063 const APInt *V; 3064 if (match(Inst->getOperand(1), m_APInt(V))) 3065 return *V != 0; 3066 return false; 3067 } 3068 case Instruction::SDiv: 3069 case Instruction::SRem: { 3070 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3071 const APInt *Numerator, *Denominator; 3072 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3073 return false; 3074 // We cannot hoist this division if the denominator is 0. 3075 if (*Denominator == 0) 3076 return false; 3077 // It's safe to hoist if the denominator is not 0 or -1. 3078 if (*Denominator != -1) 3079 return true; 3080 // At this point we know that the denominator is -1. It is safe to hoist as 3081 // long we know that the numerator is not INT_MIN. 3082 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3083 return !Numerator->isMinSignedValue(); 3084 // The numerator *might* be MinSignedValue. 3085 return false; 3086 } 3087 case Instruction::Load: { 3088 const LoadInst *LI = cast<LoadInst>(Inst); 3089 if (!LI->isUnordered() || 3090 // Speculative load may create a race that did not exist in the source. 3091 LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread)) 3092 return false; 3093 const DataLayout &DL = LI->getModule()->getDataLayout(); 3094 return isDereferenceablePointer(LI->getPointerOperand(), DL, CtxI, DT, TLI); 3095 } 3096 case Instruction::Call: { 3097 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 3098 switch (II->getIntrinsicID()) { 3099 // These synthetic intrinsics have no side-effects and just mark 3100 // information about their operands. 3101 // FIXME: There are other no-op synthetic instructions that potentially 3102 // should be considered at least *safe* to speculate... 3103 case Intrinsic::dbg_declare: 3104 case Intrinsic::dbg_value: 3105 return true; 3106 3107 case Intrinsic::bswap: 3108 case Intrinsic::ctlz: 3109 case Intrinsic::ctpop: 3110 case Intrinsic::cttz: 3111 case Intrinsic::objectsize: 3112 case Intrinsic::sadd_with_overflow: 3113 case Intrinsic::smul_with_overflow: 3114 case Intrinsic::ssub_with_overflow: 3115 case Intrinsic::uadd_with_overflow: 3116 case Intrinsic::umul_with_overflow: 3117 case Intrinsic::usub_with_overflow: 3118 return true; 3119 // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set 3120 // errno like libm sqrt would. 3121 case Intrinsic::sqrt: 3122 case Intrinsic::fma: 3123 case Intrinsic::fmuladd: 3124 case Intrinsic::fabs: 3125 case Intrinsic::minnum: 3126 case Intrinsic::maxnum: 3127 return true; 3128 // TODO: some fp intrinsics are marked as having the same error handling 3129 // as libm. They're safe to speculate when they won't error. 3130 // TODO: are convert_{from,to}_fp16 safe? 3131 // TODO: can we list target-specific intrinsics here? 3132 default: break; 3133 } 3134 } 3135 return false; // The called function could have undefined behavior or 3136 // side-effects, even if marked readnone nounwind. 3137 } 3138 case Instruction::VAArg: 3139 case Instruction::Alloca: 3140 case Instruction::Invoke: 3141 case Instruction::PHI: 3142 case Instruction::Store: 3143 case Instruction::Ret: 3144 case Instruction::Br: 3145 case Instruction::IndirectBr: 3146 case Instruction::Switch: 3147 case Instruction::Unreachable: 3148 case Instruction::Fence: 3149 case Instruction::LandingPad: 3150 case Instruction::AtomicRMW: 3151 case Instruction::AtomicCmpXchg: 3152 case Instruction::Resume: 3153 return false; // Misc instructions which have effects 3154 } 3155 } 3156 3157 /// Return true if we know that the specified value is never null. 3158 bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) { 3159 // Alloca never returns null, malloc might. 3160 if (isa<AllocaInst>(V)) return true; 3161 3162 // A byval, inalloca, or nonnull argument is never null. 3163 if (const Argument *A = dyn_cast<Argument>(V)) 3164 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr(); 3165 3166 // Global values are not null unless extern weak. 3167 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 3168 return !GV->hasExternalWeakLinkage(); 3169 3170 // A Load tagged w/nonnull metadata is never null. 3171 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 3172 return LI->getMetadata(LLVMContext::MD_nonnull); 3173 3174 if (auto CS = ImmutableCallSite(V)) 3175 if (CS.isReturnNonNull()) 3176 return true; 3177 3178 // operator new never returns null. 3179 if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true)) 3180 return true; 3181 3182 return false; 3183 } 3184 3185 static bool isKnownNonNullFromDominatingCondition(const Value *V, 3186 const Instruction *CtxI, 3187 const DominatorTree *DT) { 3188 unsigned NumUsesExplored = 0; 3189 for (auto U : V->users()) { 3190 // Avoid massive lists 3191 if (NumUsesExplored >= DomConditionsMaxUses) 3192 break; 3193 NumUsesExplored++; 3194 // Consider only compare instructions uniquely controlling a branch 3195 const ICmpInst *Cmp = dyn_cast<ICmpInst>(U); 3196 if (!Cmp) 3197 continue; 3198 3199 if (DomConditionsSingleCmpUse && !Cmp->hasOneUse()) 3200 continue; 3201 3202 for (auto *CmpU : Cmp->users()) { 3203 const BranchInst *BI = dyn_cast<BranchInst>(CmpU); 3204 if (!BI) 3205 continue; 3206 3207 assert(BI->isConditional() && "uses a comparison!"); 3208 3209 BasicBlock *NonNullSuccessor = nullptr; 3210 CmpInst::Predicate Pred; 3211 3212 if (match(const_cast<ICmpInst*>(Cmp), 3213 m_c_ICmp(Pred, m_Specific(V), m_Zero()))) { 3214 if (Pred == ICmpInst::ICMP_EQ) 3215 NonNullSuccessor = BI->getSuccessor(1); 3216 else if (Pred == ICmpInst::ICMP_NE) 3217 NonNullSuccessor = BI->getSuccessor(0); 3218 } 3219 3220 if (NonNullSuccessor) { 3221 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 3222 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 3223 return true; 3224 } 3225 } 3226 } 3227 3228 return false; 3229 } 3230 3231 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI, 3232 const DominatorTree *DT, const TargetLibraryInfo *TLI) { 3233 if (isKnownNonNull(V, TLI)) 3234 return true; 3235 3236 return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false; 3237 } 3238 3239 OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS, 3240 const DataLayout &DL, 3241 AssumptionCache *AC, 3242 const Instruction *CxtI, 3243 const DominatorTree *DT) { 3244 // Multiplying n * m significant bits yields a result of n + m significant 3245 // bits. If the total number of significant bits does not exceed the 3246 // result bit width (minus 1), there is no overflow. 3247 // This means if we have enough leading zero bits in the operands 3248 // we can guarantee that the result does not overflow. 3249 // Ref: "Hacker's Delight" by Henry Warren 3250 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3251 APInt LHSKnownZero(BitWidth, 0); 3252 APInt LHSKnownOne(BitWidth, 0); 3253 APInt RHSKnownZero(BitWidth, 0); 3254 APInt RHSKnownOne(BitWidth, 0); 3255 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI, 3256 DT); 3257 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI, 3258 DT); 3259 // Note that underestimating the number of zero bits gives a more 3260 // conservative answer. 3261 unsigned ZeroBits = LHSKnownZero.countLeadingOnes() + 3262 RHSKnownZero.countLeadingOnes(); 3263 // First handle the easy case: if we have enough zero bits there's 3264 // definitely no overflow. 3265 if (ZeroBits >= BitWidth) 3266 return OverflowResult::NeverOverflows; 3267 3268 // Get the largest possible values for each operand. 3269 APInt LHSMax = ~LHSKnownZero; 3270 APInt RHSMax = ~RHSKnownZero; 3271 3272 // We know the multiply operation doesn't overflow if the maximum values for 3273 // each operand will not overflow after we multiply them together. 3274 bool MaxOverflow; 3275 LHSMax.umul_ov(RHSMax, MaxOverflow); 3276 if (!MaxOverflow) 3277 return OverflowResult::NeverOverflows; 3278 3279 // We know it always overflows if multiplying the smallest possible values for 3280 // the operands also results in overflow. 3281 bool MinOverflow; 3282 LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow); 3283 if (MinOverflow) 3284 return OverflowResult::AlwaysOverflows; 3285 3286 return OverflowResult::MayOverflow; 3287 } 3288 3289 OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS, 3290 const DataLayout &DL, 3291 AssumptionCache *AC, 3292 const Instruction *CxtI, 3293 const DominatorTree *DT) { 3294 bool LHSKnownNonNegative, LHSKnownNegative; 3295 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0, 3296 AC, CxtI, DT); 3297 if (LHSKnownNonNegative || LHSKnownNegative) { 3298 bool RHSKnownNonNegative, RHSKnownNegative; 3299 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0, 3300 AC, CxtI, DT); 3301 3302 if (LHSKnownNegative && RHSKnownNegative) { 3303 // The sign bit is set in both cases: this MUST overflow. 3304 // Create a simple add instruction, and insert it into the struct. 3305 return OverflowResult::AlwaysOverflows; 3306 } 3307 3308 if (LHSKnownNonNegative && RHSKnownNonNegative) { 3309 // The sign bit is clear in both cases: this CANNOT overflow. 3310 // Create a simple add instruction, and insert it into the struct. 3311 return OverflowResult::NeverOverflows; 3312 } 3313 } 3314 3315 return OverflowResult::MayOverflow; 3316 } 3317 3318 static SelectPatternFlavor matchSelectPattern(ICmpInst::Predicate Pred, 3319 Value *CmpLHS, Value *CmpRHS, 3320 Value *TrueVal, Value *FalseVal, 3321 Value *&LHS, Value *&RHS) { 3322 LHS = CmpLHS; 3323 RHS = CmpRHS; 3324 3325 // (icmp X, Y) ? X : Y 3326 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 3327 switch (Pred) { 3328 default: return SPF_UNKNOWN; // Equality. 3329 case ICmpInst::ICMP_UGT: 3330 case ICmpInst::ICMP_UGE: return SPF_UMAX; 3331 case ICmpInst::ICMP_SGT: 3332 case ICmpInst::ICMP_SGE: return SPF_SMAX; 3333 case ICmpInst::ICMP_ULT: 3334 case ICmpInst::ICMP_ULE: return SPF_UMIN; 3335 case ICmpInst::ICMP_SLT: 3336 case ICmpInst::ICMP_SLE: return SPF_SMIN; 3337 } 3338 } 3339 3340 // (icmp X, Y) ? Y : X 3341 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 3342 switch (Pred) { 3343 default: return SPF_UNKNOWN; // Equality. 3344 case ICmpInst::ICMP_UGT: 3345 case ICmpInst::ICMP_UGE: return SPF_UMIN; 3346 case ICmpInst::ICMP_SGT: 3347 case ICmpInst::ICMP_SGE: return SPF_SMIN; 3348 case ICmpInst::ICMP_ULT: 3349 case ICmpInst::ICMP_ULE: return SPF_UMAX; 3350 case ICmpInst::ICMP_SLT: 3351 case ICmpInst::ICMP_SLE: return SPF_SMAX; 3352 } 3353 } 3354 3355 if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) { 3356 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) || 3357 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) { 3358 3359 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X 3360 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X 3361 if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) { 3362 return (CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS; 3363 } 3364 3365 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X 3366 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X 3367 if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) { 3368 return (CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS; 3369 } 3370 } 3371 3372 // Y >s C ? ~Y : ~C == ~Y <s ~C ? ~Y : ~C = SMIN(~Y, ~C) 3373 if (const auto *C2 = dyn_cast<ConstantInt>(FalseVal)) { 3374 if (C1->getType() == C2->getType() && ~C1->getValue() == C2->getValue() && 3375 (match(TrueVal, m_Not(m_Specific(CmpLHS))) || 3376 match(CmpLHS, m_Not(m_Specific(TrueVal))))) { 3377 LHS = TrueVal; 3378 RHS = FalseVal; 3379 return SPF_SMIN; 3380 } 3381 } 3382 } 3383 3384 // TODO: (X > 4) ? X : 5 --> (X >= 5) ? X : 5 --> MAX(X, 5) 3385 3386 return SPF_UNKNOWN; 3387 } 3388 3389 static Constant *lookThroughCast(ICmpInst *CmpI, Value *V1, Value *V2, 3390 Instruction::CastOps *CastOp) { 3391 CastInst *CI = dyn_cast<CastInst>(V1); 3392 Constant *C = dyn_cast<Constant>(V2); 3393 if (!CI || !C) 3394 return nullptr; 3395 *CastOp = CI->getOpcode(); 3396 3397 if (isa<SExtInst>(CI) && CmpI->isSigned()) { 3398 Constant *T = ConstantExpr::getTrunc(C, CI->getSrcTy()); 3399 // This is only valid if the truncated value can be sign-extended 3400 // back to the original value. 3401 if (ConstantExpr::getSExt(T, C->getType()) == C) 3402 return T; 3403 return nullptr; 3404 } 3405 if (isa<ZExtInst>(CI) && CmpI->isUnsigned()) 3406 return ConstantExpr::getTrunc(C, CI->getSrcTy()); 3407 3408 if (isa<TruncInst>(CI)) 3409 return ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned()); 3410 3411 return nullptr; 3412 } 3413 3414 SelectPatternFlavor llvm::matchSelectPattern(Value *V, 3415 Value *&LHS, Value *&RHS, 3416 Instruction::CastOps *CastOp) { 3417 SelectInst *SI = dyn_cast<SelectInst>(V); 3418 if (!SI) return SPF_UNKNOWN; 3419 3420 ICmpInst *CmpI = dyn_cast<ICmpInst>(SI->getCondition()); 3421 if (!CmpI) return SPF_UNKNOWN; 3422 3423 ICmpInst::Predicate Pred = CmpI->getPredicate(); 3424 Value *CmpLHS = CmpI->getOperand(0); 3425 Value *CmpRHS = CmpI->getOperand(1); 3426 Value *TrueVal = SI->getTrueValue(); 3427 Value *FalseVal = SI->getFalseValue(); 3428 3429 // Bail out early. 3430 if (CmpI->isEquality()) 3431 return SPF_UNKNOWN; 3432 3433 // Deal with type mismatches. 3434 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 3435 if (Constant *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) 3436 return ::matchSelectPattern(Pred, CmpLHS, CmpRHS, 3437 cast<CastInst>(TrueVal)->getOperand(0), C, 3438 LHS, RHS); 3439 if (Constant *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) 3440 return ::matchSelectPattern(Pred, CmpLHS, CmpRHS, 3441 C, cast<CastInst>(FalseVal)->getOperand(0), 3442 LHS, RHS); 3443 } 3444 return ::matchSelectPattern(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, 3445 LHS, RHS); 3446 } 3447