1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains routines that help analyze properties that chains of 11 // computations have. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/ADT/Optional.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/Analysis/AssumptionCache.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/MemoryBuiltins.h" 21 #include "llvm/Analysis/Loads.h" 22 #include "llvm/Analysis/LoopInfo.h" 23 #include "llvm/Analysis/VectorUtils.h" 24 #include "llvm/IR/CallSite.h" 25 #include "llvm/IR/ConstantRange.h" 26 #include "llvm/IR/Constants.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/Dominators.h" 29 #include "llvm/IR/GetElementPtrTypeIterator.h" 30 #include "llvm/IR/GlobalAlias.h" 31 #include "llvm/IR/GlobalVariable.h" 32 #include "llvm/IR/Instructions.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/LLVMContext.h" 35 #include "llvm/IR/Metadata.h" 36 #include "llvm/IR/Operator.h" 37 #include "llvm/IR/PatternMatch.h" 38 #include "llvm/IR/Statepoint.h" 39 #include "llvm/Support/Debug.h" 40 #include "llvm/Support/MathExtras.h" 41 #include <algorithm> 42 #include <array> 43 #include <cstring> 44 using namespace llvm; 45 using namespace llvm::PatternMatch; 46 47 const unsigned MaxDepth = 6; 48 49 // Controls the number of uses of the value searched for possible 50 // dominating comparisons. 51 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 52 cl::Hidden, cl::init(20)); 53 54 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns 55 /// 0). For vector types, returns the element type's bitwidth. 56 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 57 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 58 return BitWidth; 59 60 return DL.getPointerTypeSizeInBits(Ty); 61 } 62 63 namespace { 64 // Simplifying using an assume can only be done in a particular control-flow 65 // context (the context instruction provides that context). If an assume and 66 // the context instruction are not in the same block then the DT helps in 67 // figuring out if we can use it. 68 struct Query { 69 const DataLayout &DL; 70 AssumptionCache *AC; 71 const Instruction *CxtI; 72 const DominatorTree *DT; 73 74 /// Set of assumptions that should be excluded from further queries. 75 /// This is because of the potential for mutual recursion to cause 76 /// computeKnownBits to repeatedly visit the same assume intrinsic. The 77 /// classic case of this is assume(x = y), which will attempt to determine 78 /// bits in x from bits in y, which will attempt to determine bits in y from 79 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 80 /// isKnownNonZero, which calls computeKnownBits and ComputeSignBit and 81 /// isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so 82 /// on. 83 std::array<const Value*, MaxDepth> Excluded; 84 unsigned NumExcluded; 85 86 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 87 const DominatorTree *DT) 88 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), NumExcluded(0) {} 89 90 Query(const Query &Q, const Value *NewExcl) 91 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), NumExcluded(Q.NumExcluded) { 92 Excluded = Q.Excluded; 93 Excluded[NumExcluded++] = NewExcl; 94 assert(NumExcluded <= Excluded.size()); 95 } 96 97 bool isExcluded(const Value *Value) const { 98 if (NumExcluded == 0) 99 return false; 100 auto End = Excluded.begin() + NumExcluded; 101 return std::find(Excluded.begin(), End, Value) != End; 102 } 103 }; 104 } // end anonymous namespace 105 106 // Given the provided Value and, potentially, a context instruction, return 107 // the preferred context instruction (if any). 108 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 109 // If we've been provided with a context instruction, then use that (provided 110 // it has been inserted). 111 if (CxtI && CxtI->getParent()) 112 return CxtI; 113 114 // If the value is really an already-inserted instruction, then use that. 115 CxtI = dyn_cast<Instruction>(V); 116 if (CxtI && CxtI->getParent()) 117 return CxtI; 118 119 return nullptr; 120 } 121 122 static void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 123 unsigned Depth, const Query &Q); 124 125 void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 126 const DataLayout &DL, unsigned Depth, 127 AssumptionCache *AC, const Instruction *CxtI, 128 const DominatorTree *DT) { 129 ::computeKnownBits(V, KnownZero, KnownOne, Depth, 130 Query(DL, AC, safeCxtI(V, CxtI), DT)); 131 } 132 133 bool llvm::haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL, 134 AssumptionCache *AC, const Instruction *CxtI, 135 const DominatorTree *DT) { 136 assert(LHS->getType() == RHS->getType() && 137 "LHS and RHS should have the same type"); 138 assert(LHS->getType()->isIntOrIntVectorTy() && 139 "LHS and RHS should be integers"); 140 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 141 APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0); 142 APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0); 143 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT); 144 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT); 145 return (LHSKnownZero | RHSKnownZero).isAllOnesValue(); 146 } 147 148 static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 149 unsigned Depth, const Query &Q); 150 151 void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 152 const DataLayout &DL, unsigned Depth, 153 AssumptionCache *AC, const Instruction *CxtI, 154 const DominatorTree *DT) { 155 ::ComputeSignBit(V, KnownZero, KnownOne, Depth, 156 Query(DL, AC, safeCxtI(V, CxtI), DT)); 157 } 158 159 static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth, 160 const Query &Q); 161 162 bool llvm::isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, bool OrZero, 163 unsigned Depth, AssumptionCache *AC, 164 const Instruction *CxtI, 165 const DominatorTree *DT) { 166 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, 167 Query(DL, AC, safeCxtI(V, CxtI), DT)); 168 } 169 170 static bool isKnownNonZero(Value *V, unsigned Depth, const Query &Q); 171 172 bool llvm::isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth, 173 AssumptionCache *AC, const Instruction *CxtI, 174 const DominatorTree *DT) { 175 return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 176 } 177 178 bool llvm::isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth, 179 AssumptionCache *AC, const Instruction *CxtI, 180 const DominatorTree *DT) { 181 bool NonNegative, Negative; 182 ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT); 183 return NonNegative; 184 } 185 186 bool llvm::isKnownPositive(Value *V, const DataLayout &DL, unsigned Depth, 187 AssumptionCache *AC, const Instruction *CxtI, 188 const DominatorTree *DT) { 189 if (auto *CI = dyn_cast<ConstantInt>(V)) 190 return CI->getValue().isStrictlyPositive(); 191 192 // TODO: We'd doing two recursive queries here. We should factor this such 193 // that only a single query is needed. 194 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) && 195 isKnownNonZero(V, DL, Depth, AC, CxtI, DT); 196 } 197 198 bool llvm::isKnownNegative(Value *V, const DataLayout &DL, unsigned Depth, 199 AssumptionCache *AC, const Instruction *CxtI, 200 const DominatorTree *DT) { 201 bool NonNegative, Negative; 202 ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT); 203 return Negative; 204 } 205 206 static bool isKnownNonEqual(Value *V1, Value *V2, const Query &Q); 207 208 bool llvm::isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL, 209 AssumptionCache *AC, const Instruction *CxtI, 210 const DominatorTree *DT) { 211 return ::isKnownNonEqual(V1, V2, Query(DL, AC, 212 safeCxtI(V1, safeCxtI(V2, CxtI)), 213 DT)); 214 } 215 216 static bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth, 217 const Query &Q); 218 219 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL, 220 unsigned Depth, AssumptionCache *AC, 221 const Instruction *CxtI, const DominatorTree *DT) { 222 return ::MaskedValueIsZero(V, Mask, Depth, 223 Query(DL, AC, safeCxtI(V, CxtI), DT)); 224 } 225 226 static unsigned ComputeNumSignBits(Value *V, unsigned Depth, const Query &Q); 227 228 unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout &DL, 229 unsigned Depth, AssumptionCache *AC, 230 const Instruction *CxtI, 231 const DominatorTree *DT) { 232 return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 233 } 234 235 static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, 236 APInt &KnownZero, APInt &KnownOne, 237 APInt &KnownZero2, APInt &KnownOne2, 238 unsigned Depth, const Query &Q) { 239 if (!Add) { 240 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) { 241 // We know that the top bits of C-X are clear if X contains less bits 242 // than C (i.e. no wrap-around can happen). For example, 20-X is 243 // positive if we can prove that X is >= 0 and < 16. 244 if (!CLHS->getValue().isNegative()) { 245 unsigned BitWidth = KnownZero.getBitWidth(); 246 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros(); 247 // NLZ can't be BitWidth with no sign bit 248 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 249 computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q); 250 251 // If all of the MaskV bits are known to be zero, then we know the 252 // output top bits are zero, because we now know that the output is 253 // from [0-C]. 254 if ((KnownZero2 & MaskV) == MaskV) { 255 unsigned NLZ2 = CLHS->getValue().countLeadingZeros(); 256 // Top bits known zero. 257 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2); 258 } 259 } 260 } 261 } 262 263 unsigned BitWidth = KnownZero.getBitWidth(); 264 265 // If an initial sequence of bits in the result is not needed, the 266 // corresponding bits in the operands are not needed. 267 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 268 computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, Depth + 1, Q); 269 computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q); 270 271 // Carry in a 1 for a subtract, rather than a 0. 272 APInt CarryIn(BitWidth, 0); 273 if (!Add) { 274 // Sum = LHS + ~RHS + 1 275 std::swap(KnownZero2, KnownOne2); 276 CarryIn.setBit(0); 277 } 278 279 APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn; 280 APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn; 281 282 // Compute known bits of the carry. 283 APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2); 284 APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2; 285 286 // Compute set of known bits (where all three relevant bits are known). 287 APInt LHSKnown = LHSKnownZero | LHSKnownOne; 288 APInt RHSKnown = KnownZero2 | KnownOne2; 289 APInt CarryKnown = CarryKnownZero | CarryKnownOne; 290 APInt Known = LHSKnown & RHSKnown & CarryKnown; 291 292 assert((PossibleSumZero & Known) == (PossibleSumOne & Known) && 293 "known bits of sum differ"); 294 295 // Compute known bits of the result. 296 KnownZero = ~PossibleSumOne & Known; 297 KnownOne = PossibleSumOne & Known; 298 299 // Are we still trying to solve for the sign bit? 300 if (!Known.isNegative()) { 301 if (NSW) { 302 // Adding two non-negative numbers, or subtracting a negative number from 303 // a non-negative one, can't wrap into negative. 304 if (LHSKnownZero.isNegative() && KnownZero2.isNegative()) 305 KnownZero |= APInt::getSignBit(BitWidth); 306 // Adding two negative numbers, or subtracting a non-negative number from 307 // a negative one, can't wrap into non-negative. 308 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative()) 309 KnownOne |= APInt::getSignBit(BitWidth); 310 } 311 } 312 } 313 314 static void computeKnownBitsMul(Value *Op0, Value *Op1, bool NSW, 315 APInt &KnownZero, APInt &KnownOne, 316 APInt &KnownZero2, APInt &KnownOne2, 317 unsigned Depth, const Query &Q) { 318 unsigned BitWidth = KnownZero.getBitWidth(); 319 computeKnownBits(Op1, KnownZero, KnownOne, Depth + 1, Q); 320 computeKnownBits(Op0, KnownZero2, KnownOne2, Depth + 1, Q); 321 322 bool isKnownNegative = false; 323 bool isKnownNonNegative = false; 324 // If the multiplication is known not to overflow, compute the sign bit. 325 if (NSW) { 326 if (Op0 == Op1) { 327 // The product of a number with itself is non-negative. 328 isKnownNonNegative = true; 329 } else { 330 bool isKnownNonNegativeOp1 = KnownZero.isNegative(); 331 bool isKnownNonNegativeOp0 = KnownZero2.isNegative(); 332 bool isKnownNegativeOp1 = KnownOne.isNegative(); 333 bool isKnownNegativeOp0 = KnownOne2.isNegative(); 334 // The product of two numbers with the same sign is non-negative. 335 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 336 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 337 // The product of a negative number and a non-negative number is either 338 // negative or zero. 339 if (!isKnownNonNegative) 340 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 341 isKnownNonZero(Op0, Depth, Q)) || 342 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 343 isKnownNonZero(Op1, Depth, Q)); 344 } 345 } 346 347 // If low bits are zero in either operand, output low known-0 bits. 348 // Also compute a conservative estimate for high known-0 bits. 349 // More trickiness is possible, but this is sufficient for the 350 // interesting case of alignment computation. 351 KnownOne.clearAllBits(); 352 unsigned TrailZ = KnownZero.countTrailingOnes() + 353 KnownZero2.countTrailingOnes(); 354 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 355 KnownZero2.countLeadingOnes(), 356 BitWidth) - BitWidth; 357 358 TrailZ = std::min(TrailZ, BitWidth); 359 LeadZ = std::min(LeadZ, BitWidth); 360 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 361 APInt::getHighBitsSet(BitWidth, LeadZ); 362 363 // Only make use of no-wrap flags if we failed to compute the sign bit 364 // directly. This matters if the multiplication always overflows, in 365 // which case we prefer to follow the result of the direct computation, 366 // though as the program is invoking undefined behaviour we can choose 367 // whatever we like here. 368 if (isKnownNonNegative && !KnownOne.isNegative()) 369 KnownZero.setBit(BitWidth - 1); 370 else if (isKnownNegative && !KnownZero.isNegative()) 371 KnownOne.setBit(BitWidth - 1); 372 } 373 374 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 375 APInt &KnownZero, 376 APInt &KnownOne) { 377 unsigned BitWidth = KnownZero.getBitWidth(); 378 unsigned NumRanges = Ranges.getNumOperands() / 2; 379 assert(NumRanges >= 1); 380 381 KnownZero.setAllBits(); 382 KnownOne.setAllBits(); 383 384 for (unsigned i = 0; i < NumRanges; ++i) { 385 ConstantInt *Lower = 386 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 387 ConstantInt *Upper = 388 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 389 ConstantRange Range(Lower->getValue(), Upper->getValue()); 390 391 // The first CommonPrefixBits of all values in Range are equal. 392 unsigned CommonPrefixBits = 393 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 394 395 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 396 KnownOne &= Range.getUnsignedMax() & Mask; 397 KnownZero &= ~Range.getUnsignedMax() & Mask; 398 } 399 } 400 401 static bool isEphemeralValueOf(Instruction *I, const Value *E) { 402 SmallVector<const Value *, 16> WorkSet(1, I); 403 SmallPtrSet<const Value *, 32> Visited; 404 SmallPtrSet<const Value *, 16> EphValues; 405 406 // The instruction defining an assumption's condition itself is always 407 // considered ephemeral to that assumption (even if it has other 408 // non-ephemeral users). See r246696's test case for an example. 409 if (std::find(I->op_begin(), I->op_end(), E) != I->op_end()) 410 return true; 411 412 while (!WorkSet.empty()) { 413 const Value *V = WorkSet.pop_back_val(); 414 if (!Visited.insert(V).second) 415 continue; 416 417 // If all uses of this value are ephemeral, then so is this value. 418 if (std::all_of(V->user_begin(), V->user_end(), 419 [&](const User *U) { return EphValues.count(U); })) { 420 if (V == E) 421 return true; 422 423 EphValues.insert(V); 424 if (const User *U = dyn_cast<User>(V)) 425 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 426 J != JE; ++J) { 427 if (isSafeToSpeculativelyExecute(*J)) 428 WorkSet.push_back(*J); 429 } 430 } 431 } 432 433 return false; 434 } 435 436 // Is this an intrinsic that cannot be speculated but also cannot trap? 437 static bool isAssumeLikeIntrinsic(const Instruction *I) { 438 if (const CallInst *CI = dyn_cast<CallInst>(I)) 439 if (Function *F = CI->getCalledFunction()) 440 switch (F->getIntrinsicID()) { 441 default: break; 442 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 443 case Intrinsic::assume: 444 case Intrinsic::dbg_declare: 445 case Intrinsic::dbg_value: 446 case Intrinsic::invariant_start: 447 case Intrinsic::invariant_end: 448 case Intrinsic::lifetime_start: 449 case Intrinsic::lifetime_end: 450 case Intrinsic::objectsize: 451 case Intrinsic::ptr_annotation: 452 case Intrinsic::var_annotation: 453 return true; 454 } 455 456 return false; 457 } 458 459 static bool isValidAssumeForContext(Value *V, const Instruction *CxtI, 460 const DominatorTree *DT) { 461 Instruction *Inv = cast<Instruction>(V); 462 463 // There are two restrictions on the use of an assume: 464 // 1. The assume must dominate the context (or the control flow must 465 // reach the assume whenever it reaches the context). 466 // 2. The context must not be in the assume's set of ephemeral values 467 // (otherwise we will use the assume to prove that the condition 468 // feeding the assume is trivially true, thus causing the removal of 469 // the assume). 470 471 if (DT) { 472 if (DT->dominates(Inv, CxtI)) { 473 return true; 474 } else if (Inv->getParent() == CxtI->getParent()) { 475 // The context comes first, but they're both in the same block. Make sure 476 // there is nothing in between that might interrupt the control flow. 477 for (BasicBlock::const_iterator I = 478 std::next(BasicBlock::const_iterator(CxtI)), 479 IE(Inv); I != IE; ++I) 480 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 481 return false; 482 483 return !isEphemeralValueOf(Inv, CxtI); 484 } 485 486 return false; 487 } 488 489 // When we don't have a DT, we do a limited search... 490 if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 491 return true; 492 } else if (Inv->getParent() == CxtI->getParent()) { 493 // Search forward from the assume until we reach the context (or the end 494 // of the block); the common case is that the assume will come first. 495 for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)), 496 IE = Inv->getParent()->end(); I != IE; ++I) 497 if (&*I == CxtI) 498 return true; 499 500 // The context must come first... 501 for (BasicBlock::const_iterator I = 502 std::next(BasicBlock::const_iterator(CxtI)), 503 IE(Inv); I != IE; ++I) 504 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 505 return false; 506 507 return !isEphemeralValueOf(Inv, CxtI); 508 } 509 510 return false; 511 } 512 513 bool llvm::isValidAssumeForContext(const Instruction *I, 514 const Instruction *CxtI, 515 const DominatorTree *DT) { 516 return ::isValidAssumeForContext(const_cast<Instruction *>(I), CxtI, DT); 517 } 518 519 static void computeKnownBitsFromAssume(Value *V, APInt &KnownZero, 520 APInt &KnownOne, unsigned Depth, 521 const Query &Q) { 522 // Use of assumptions is context-sensitive. If we don't have a context, we 523 // cannot use them! 524 if (!Q.AC || !Q.CxtI) 525 return; 526 527 unsigned BitWidth = KnownZero.getBitWidth(); 528 529 for (auto &AssumeVH : Q.AC->assumptions()) { 530 if (!AssumeVH) 531 continue; 532 CallInst *I = cast<CallInst>(AssumeVH); 533 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 534 "Got assumption for the wrong function!"); 535 if (Q.isExcluded(I)) 536 continue; 537 538 // Warning: This loop can end up being somewhat performance sensetive. 539 // We're running this loop for once for each value queried resulting in a 540 // runtime of ~O(#assumes * #values). 541 542 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 543 "must be an assume intrinsic"); 544 545 Value *Arg = I->getArgOperand(0); 546 547 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 548 assert(BitWidth == 1 && "assume operand is not i1?"); 549 KnownZero.clearAllBits(); 550 KnownOne.setAllBits(); 551 return; 552 } 553 554 // The remaining tests are all recursive, so bail out if we hit the limit. 555 if (Depth == MaxDepth) 556 continue; 557 558 Value *A, *B; 559 auto m_V = m_CombineOr(m_Specific(V), 560 m_CombineOr(m_PtrToInt(m_Specific(V)), 561 m_BitCast(m_Specific(V)))); 562 563 CmpInst::Predicate Pred; 564 ConstantInt *C; 565 // assume(v = a) 566 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 567 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 568 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 569 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 570 KnownZero |= RHSKnownZero; 571 KnownOne |= RHSKnownOne; 572 // assume(v & b = a) 573 } else if (match(Arg, 574 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 575 Pred == ICmpInst::ICMP_EQ && 576 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 577 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 578 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 579 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); 580 computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I)); 581 582 // For those bits in the mask that are known to be one, we can propagate 583 // known bits from the RHS to V. 584 KnownZero |= RHSKnownZero & MaskKnownOne; 585 KnownOne |= RHSKnownOne & MaskKnownOne; 586 // assume(~(v & b) = a) 587 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 588 m_Value(A))) && 589 Pred == ICmpInst::ICMP_EQ && 590 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 591 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 592 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 593 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); 594 computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I)); 595 596 // For those bits in the mask that are known to be one, we can propagate 597 // inverted known bits from the RHS to V. 598 KnownZero |= RHSKnownOne & MaskKnownOne; 599 KnownOne |= RHSKnownZero & MaskKnownOne; 600 // assume(v | b = a) 601 } else if (match(Arg, 602 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 603 Pred == ICmpInst::ICMP_EQ && 604 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 605 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 606 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 607 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 608 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I)); 609 610 // For those bits in B that are known to be zero, we can propagate known 611 // bits from the RHS to V. 612 KnownZero |= RHSKnownZero & BKnownZero; 613 KnownOne |= RHSKnownOne & BKnownZero; 614 // assume(~(v | b) = a) 615 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 616 m_Value(A))) && 617 Pred == ICmpInst::ICMP_EQ && 618 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 619 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 620 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 621 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 622 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I)); 623 624 // For those bits in B that are known to be zero, we can propagate 625 // inverted known bits from the RHS to V. 626 KnownZero |= RHSKnownOne & BKnownZero; 627 KnownOne |= RHSKnownZero & BKnownZero; 628 // assume(v ^ b = a) 629 } else if (match(Arg, 630 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 631 Pred == ICmpInst::ICMP_EQ && 632 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 633 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 634 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 635 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 636 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I)); 637 638 // For those bits in B that are known to be zero, we can propagate known 639 // bits from the RHS to V. For those bits in B that are known to be one, 640 // we can propagate inverted known bits from the RHS to V. 641 KnownZero |= RHSKnownZero & BKnownZero; 642 KnownOne |= RHSKnownOne & BKnownZero; 643 KnownZero |= RHSKnownOne & BKnownOne; 644 KnownOne |= RHSKnownZero & BKnownOne; 645 // assume(~(v ^ b) = a) 646 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 647 m_Value(A))) && 648 Pred == ICmpInst::ICMP_EQ && 649 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 650 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 651 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 652 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 653 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I)); 654 655 // For those bits in B that are known to be zero, we can propagate 656 // inverted known bits from the RHS to V. For those bits in B that are 657 // known to be one, we can propagate known bits from the RHS to V. 658 KnownZero |= RHSKnownOne & BKnownZero; 659 KnownOne |= RHSKnownZero & BKnownZero; 660 KnownZero |= RHSKnownZero & BKnownOne; 661 KnownOne |= RHSKnownOne & BKnownOne; 662 // assume(v << c = a) 663 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 664 m_Value(A))) && 665 Pred == ICmpInst::ICMP_EQ && 666 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 667 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 668 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 669 // For those bits in RHS that are known, we can propagate them to known 670 // bits in V shifted to the right by C. 671 KnownZero |= RHSKnownZero.lshr(C->getZExtValue()); 672 KnownOne |= RHSKnownOne.lshr(C->getZExtValue()); 673 // assume(~(v << c) = a) 674 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 675 m_Value(A))) && 676 Pred == ICmpInst::ICMP_EQ && 677 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 678 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 679 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 680 // For those bits in RHS that are known, we can propagate them inverted 681 // to known bits in V shifted to the right by C. 682 KnownZero |= RHSKnownOne.lshr(C->getZExtValue()); 683 KnownOne |= RHSKnownZero.lshr(C->getZExtValue()); 684 // assume(v >> c = a) 685 } else if (match(Arg, 686 m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)), 687 m_AShr(m_V, m_ConstantInt(C))), 688 m_Value(A))) && 689 Pred == ICmpInst::ICMP_EQ && 690 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 691 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 692 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 693 // For those bits in RHS that are known, we can propagate them to known 694 // bits in V shifted to the right by C. 695 KnownZero |= RHSKnownZero << C->getZExtValue(); 696 KnownOne |= RHSKnownOne << C->getZExtValue(); 697 // assume(~(v >> c) = a) 698 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr( 699 m_LShr(m_V, m_ConstantInt(C)), 700 m_AShr(m_V, m_ConstantInt(C)))), 701 m_Value(A))) && 702 Pred == ICmpInst::ICMP_EQ && 703 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 704 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 705 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 706 // For those bits in RHS that are known, we can propagate them inverted 707 // to known bits in V shifted to the right by C. 708 KnownZero |= RHSKnownOne << C->getZExtValue(); 709 KnownOne |= RHSKnownZero << C->getZExtValue(); 710 // assume(v >=_s c) where c is non-negative 711 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 712 Pred == ICmpInst::ICMP_SGE && 713 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 714 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 715 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 716 717 if (RHSKnownZero.isNegative()) { 718 // We know that the sign bit is zero. 719 KnownZero |= APInt::getSignBit(BitWidth); 720 } 721 // assume(v >_s c) where c is at least -1. 722 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 723 Pred == ICmpInst::ICMP_SGT && 724 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 725 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 726 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 727 728 if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) { 729 // We know that the sign bit is zero. 730 KnownZero |= APInt::getSignBit(BitWidth); 731 } 732 // assume(v <=_s c) where c is negative 733 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 734 Pred == ICmpInst::ICMP_SLE && 735 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 736 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 737 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 738 739 if (RHSKnownOne.isNegative()) { 740 // We know that the sign bit is one. 741 KnownOne |= APInt::getSignBit(BitWidth); 742 } 743 // assume(v <_s c) where c is non-positive 744 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 745 Pred == ICmpInst::ICMP_SLT && 746 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 747 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 748 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 749 750 if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) { 751 // We know that the sign bit is one. 752 KnownOne |= APInt::getSignBit(BitWidth); 753 } 754 // assume(v <=_u c) 755 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 756 Pred == ICmpInst::ICMP_ULE && 757 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 758 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 759 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 760 761 // Whatever high bits in c are zero are known to be zero. 762 KnownZero |= 763 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()); 764 // assume(v <_u c) 765 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 766 Pred == ICmpInst::ICMP_ULT && 767 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 768 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 769 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 770 771 // Whatever high bits in c are zero are known to be zero (if c is a power 772 // of 2, then one more). 773 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) 774 KnownZero |= 775 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()+1); 776 else 777 KnownZero |= 778 APInt::getHighBitsSet(BitWidth, RHSKnownZero.countLeadingOnes()); 779 } 780 } 781 } 782 783 // Compute known bits from a shift operator, including those with a 784 // non-constant shift amount. KnownZero and KnownOne are the outputs of this 785 // function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the 786 // same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific 787 // functors that, given the known-zero or known-one bits respectively, and a 788 // shift amount, compute the implied known-zero or known-one bits of the shift 789 // operator's result respectively for that shift amount. The results from calling 790 // KZF and KOF are conservatively combined for all permitted shift amounts. 791 template <typename KZFunctor, typename KOFunctor> 792 static void computeKnownBitsFromShiftOperator(Operator *I, 793 APInt &KnownZero, APInt &KnownOne, 794 APInt &KnownZero2, APInt &KnownOne2, 795 unsigned Depth, const Query &Q, KZFunctor KZF, KOFunctor KOF) { 796 unsigned BitWidth = KnownZero.getBitWidth(); 797 798 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 799 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); 800 801 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 802 KnownZero = KZF(KnownZero, ShiftAmt); 803 KnownOne = KOF(KnownOne, ShiftAmt); 804 return; 805 } 806 807 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q); 808 809 // Note: We cannot use KnownZero.getLimitedValue() here, because if 810 // BitWidth > 64 and any upper bits are known, we'll end up returning the 811 // limit value (which implies all bits are known). 812 uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue(); 813 uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue(); 814 815 // It would be more-clearly correct to use the two temporaries for this 816 // calculation. Reusing the APInts here to prevent unnecessary allocations. 817 KnownZero.clearAllBits(); 818 KnownOne.clearAllBits(); 819 820 // If we know the shifter operand is nonzero, we can sometimes infer more 821 // known bits. However this is expensive to compute, so be lazy about it and 822 // only compute it when absolutely necessary. 823 Optional<bool> ShifterOperandIsNonZero; 824 825 // Early exit if we can't constrain any well-defined shift amount. 826 if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) { 827 ShifterOperandIsNonZero = 828 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 829 if (!*ShifterOperandIsNonZero) 830 return; 831 } 832 833 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 834 835 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth); 836 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 837 // Combine the shifted known input bits only for those shift amounts 838 // compatible with its known constraints. 839 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 840 continue; 841 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 842 continue; 843 // If we know the shifter is nonzero, we may be able to infer more known 844 // bits. This check is sunk down as far as possible to avoid the expensive 845 // call to isKnownNonZero if the cheaper checks above fail. 846 if (ShiftAmt == 0) { 847 if (!ShifterOperandIsNonZero.hasValue()) 848 ShifterOperandIsNonZero = 849 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 850 if (*ShifterOperandIsNonZero) 851 continue; 852 } 853 854 KnownZero &= KZF(KnownZero2, ShiftAmt); 855 KnownOne &= KOF(KnownOne2, ShiftAmt); 856 } 857 858 // If there are no compatible shift amounts, then we've proven that the shift 859 // amount must be >= the BitWidth, and the result is undefined. We could 860 // return anything we'd like, but we need to make sure the sets of known bits 861 // stay disjoint (it should be better for some other code to actually 862 // propagate the undef than to pick a value here using known bits). 863 if ((KnownZero & KnownOne) != 0) { 864 KnownZero.clearAllBits(); 865 KnownOne.clearAllBits(); 866 } 867 } 868 869 static void computeKnownBitsFromOperator(Operator *I, APInt &KnownZero, 870 APInt &KnownOne, unsigned Depth, 871 const Query &Q) { 872 unsigned BitWidth = KnownZero.getBitWidth(); 873 874 APInt KnownZero2(KnownZero), KnownOne2(KnownOne); 875 switch (I->getOpcode()) { 876 default: break; 877 case Instruction::Load: 878 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 879 computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne); 880 break; 881 case Instruction::And: { 882 // If either the LHS or the RHS are Zero, the result is zero. 883 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q); 884 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 885 886 // Output known-1 bits are only known if set in both the LHS & RHS. 887 KnownOne &= KnownOne2; 888 // Output known-0 are known to be clear if zero in either the LHS | RHS. 889 KnownZero |= KnownZero2; 890 891 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 892 // here we handle the more general case of adding any odd number by 893 // matching the form add(x, add(x, y)) where y is odd. 894 // TODO: This could be generalized to clearing any bit set in y where the 895 // following bit is known to be unset in y. 896 Value *Y = nullptr; 897 if (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)), 898 m_Value(Y))) || 899 match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)), 900 m_Value(Y)))) { 901 APInt KnownZero3(BitWidth, 0), KnownOne3(BitWidth, 0); 902 computeKnownBits(Y, KnownZero3, KnownOne3, Depth + 1, Q); 903 if (KnownOne3.countTrailingOnes() > 0) 904 KnownZero |= APInt::getLowBitsSet(BitWidth, 1); 905 } 906 break; 907 } 908 case Instruction::Or: { 909 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q); 910 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 911 912 // Output known-0 bits are only known if clear in both the LHS & RHS. 913 KnownZero &= KnownZero2; 914 // Output known-1 are known to be set if set in either the LHS | RHS. 915 KnownOne |= KnownOne2; 916 break; 917 } 918 case Instruction::Xor: { 919 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q); 920 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 921 922 // Output known-0 bits are known if clear or set in both the LHS & RHS. 923 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 924 // Output known-1 are known to be set if set in only one of the LHS, RHS. 925 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 926 KnownZero = KnownZeroOut; 927 break; 928 } 929 case Instruction::Mul: { 930 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 931 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero, 932 KnownOne, KnownZero2, KnownOne2, Depth, Q); 933 break; 934 } 935 case Instruction::UDiv: { 936 // For the purposes of computing leading zeros we can conservatively 937 // treat a udiv as a logical right shift by the power of 2 known to 938 // be less than the denominator. 939 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 940 unsigned LeadZ = KnownZero2.countLeadingOnes(); 941 942 KnownOne2.clearAllBits(); 943 KnownZero2.clearAllBits(); 944 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q); 945 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 946 if (RHSUnknownLeadingOnes != BitWidth) 947 LeadZ = std::min(BitWidth, 948 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 949 950 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ); 951 break; 952 } 953 case Instruction::Select: 954 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q); 955 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q); 956 957 // Only known if known in both the LHS and RHS. 958 KnownOne &= KnownOne2; 959 KnownZero &= KnownZero2; 960 break; 961 case Instruction::FPTrunc: 962 case Instruction::FPExt: 963 case Instruction::FPToUI: 964 case Instruction::FPToSI: 965 case Instruction::SIToFP: 966 case Instruction::UIToFP: 967 break; // Can't work with floating point. 968 case Instruction::PtrToInt: 969 case Instruction::IntToPtr: 970 case Instruction::AddrSpaceCast: // Pointers could be different sizes. 971 // FALL THROUGH and handle them the same as zext/trunc. 972 case Instruction::ZExt: 973 case Instruction::Trunc: { 974 Type *SrcTy = I->getOperand(0)->getType(); 975 976 unsigned SrcBitWidth; 977 // Note that we handle pointer operands here because of inttoptr/ptrtoint 978 // which fall through here. 979 SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType()); 980 981 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 982 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth); 983 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth); 984 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 985 KnownZero = KnownZero.zextOrTrunc(BitWidth); 986 KnownOne = KnownOne.zextOrTrunc(BitWidth); 987 // Any top bits are known to be zero. 988 if (BitWidth > SrcBitWidth) 989 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 990 break; 991 } 992 case Instruction::BitCast: { 993 Type *SrcTy = I->getOperand(0)->getType(); 994 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 995 // TODO: For now, not handling conversions like: 996 // (bitcast i64 %x to <2 x i32>) 997 !I->getType()->isVectorTy()) { 998 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 999 break; 1000 } 1001 break; 1002 } 1003 case Instruction::SExt: { 1004 // Compute the bits in the result that are not present in the input. 1005 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1006 1007 KnownZero = KnownZero.trunc(SrcBitWidth); 1008 KnownOne = KnownOne.trunc(SrcBitWidth); 1009 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 1010 KnownZero = KnownZero.zext(BitWidth); 1011 KnownOne = KnownOne.zext(BitWidth); 1012 1013 // If the sign bit of the input is known set or clear, then we know the 1014 // top bits of the result. 1015 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero 1016 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 1017 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set 1018 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 1019 break; 1020 } 1021 case Instruction::Shl: { 1022 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1023 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) { 1024 return (KnownZero << ShiftAmt) | 1025 APInt::getLowBitsSet(BitWidth, ShiftAmt); // Low bits known 0. 1026 }; 1027 1028 auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) { 1029 return KnownOne << ShiftAmt; 1030 }; 1031 1032 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne, 1033 KnownZero2, KnownOne2, Depth, Q, KZF, 1034 KOF); 1035 break; 1036 } 1037 case Instruction::LShr: { 1038 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1039 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) { 1040 return APIntOps::lshr(KnownZero, ShiftAmt) | 1041 // High bits known zero. 1042 APInt::getHighBitsSet(BitWidth, ShiftAmt); 1043 }; 1044 1045 auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) { 1046 return APIntOps::lshr(KnownOne, ShiftAmt); 1047 }; 1048 1049 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne, 1050 KnownZero2, KnownOne2, Depth, Q, KZF, 1051 KOF); 1052 break; 1053 } 1054 case Instruction::AShr: { 1055 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1056 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) { 1057 return APIntOps::ashr(KnownZero, ShiftAmt); 1058 }; 1059 1060 auto KOF = [BitWidth](const APInt &KnownOne, unsigned ShiftAmt) { 1061 return APIntOps::ashr(KnownOne, ShiftAmt); 1062 }; 1063 1064 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne, 1065 KnownZero2, KnownOne2, Depth, Q, KZF, 1066 KOF); 1067 break; 1068 } 1069 case Instruction::Sub: { 1070 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1071 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1072 KnownZero, KnownOne, KnownZero2, KnownOne2, Depth, 1073 Q); 1074 break; 1075 } 1076 case Instruction::Add: { 1077 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1078 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1079 KnownZero, KnownOne, KnownZero2, KnownOne2, Depth, 1080 Q); 1081 break; 1082 } 1083 case Instruction::SRem: 1084 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1085 APInt RA = Rem->getValue().abs(); 1086 if (RA.isPowerOf2()) { 1087 APInt LowBits = RA - 1; 1088 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, 1089 Q); 1090 1091 // The low bits of the first operand are unchanged by the srem. 1092 KnownZero = KnownZero2 & LowBits; 1093 KnownOne = KnownOne2 & LowBits; 1094 1095 // If the first operand is non-negative or has all low bits zero, then 1096 // the upper bits are all zero. 1097 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 1098 KnownZero |= ~LowBits; 1099 1100 // If the first operand is negative and not all low bits are zero, then 1101 // the upper bits are all one. 1102 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0)) 1103 KnownOne |= ~LowBits; 1104 1105 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1106 } 1107 } 1108 1109 // The sign bit is the LHS's sign bit, except when the result of the 1110 // remainder is zero. 1111 if (KnownZero.isNonNegative()) { 1112 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 1113 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1, 1114 Q); 1115 // If it's known zero, our sign bit is also zero. 1116 if (LHSKnownZero.isNegative()) 1117 KnownZero.setBit(BitWidth - 1); 1118 } 1119 1120 break; 1121 case Instruction::URem: { 1122 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1123 const APInt &RA = Rem->getValue(); 1124 if (RA.isPowerOf2()) { 1125 APInt LowBits = (RA - 1); 1126 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 1127 KnownZero |= ~LowBits; 1128 KnownOne &= LowBits; 1129 break; 1130 } 1131 } 1132 1133 // Since the result is less than or equal to either operand, any leading 1134 // zero bits in either operand must also exist in the result. 1135 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 1136 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q); 1137 1138 unsigned Leaders = std::max(KnownZero.countLeadingOnes(), 1139 KnownZero2.countLeadingOnes()); 1140 KnownOne.clearAllBits(); 1141 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders); 1142 break; 1143 } 1144 1145 case Instruction::Alloca: { 1146 AllocaInst *AI = cast<AllocaInst>(I); 1147 unsigned Align = AI->getAlignment(); 1148 if (Align == 0) 1149 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); 1150 1151 if (Align > 0) 1152 KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); 1153 break; 1154 } 1155 case Instruction::GetElementPtr: { 1156 // Analyze all of the subscripts of this getelementptr instruction 1157 // to determine if we can prove known low zero bits. 1158 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); 1159 computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, Depth + 1, 1160 Q); 1161 unsigned TrailZ = LocalKnownZero.countTrailingOnes(); 1162 1163 gep_type_iterator GTI = gep_type_begin(I); 1164 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1165 Value *Index = I->getOperand(i); 1166 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 1167 // Handle struct member offset arithmetic. 1168 1169 // Handle case when index is vector zeroinitializer 1170 Constant *CIndex = cast<Constant>(Index); 1171 if (CIndex->isZeroValue()) 1172 continue; 1173 1174 if (CIndex->getType()->isVectorTy()) 1175 Index = CIndex->getSplatValue(); 1176 1177 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1178 const StructLayout *SL = Q.DL.getStructLayout(STy); 1179 uint64_t Offset = SL->getElementOffset(Idx); 1180 TrailZ = std::min<unsigned>(TrailZ, 1181 countTrailingZeros(Offset)); 1182 } else { 1183 // Handle array index arithmetic. 1184 Type *IndexedTy = GTI.getIndexedType(); 1185 if (!IndexedTy->isSized()) { 1186 TrailZ = 0; 1187 break; 1188 } 1189 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1190 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1191 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); 1192 computeKnownBits(Index, LocalKnownZero, LocalKnownOne, Depth + 1, Q); 1193 TrailZ = std::min(TrailZ, 1194 unsigned(countTrailingZeros(TypeSize) + 1195 LocalKnownZero.countTrailingOnes())); 1196 } 1197 } 1198 1199 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ); 1200 break; 1201 } 1202 case Instruction::PHI: { 1203 PHINode *P = cast<PHINode>(I); 1204 // Handle the case of a simple two-predecessor recurrence PHI. 1205 // There's a lot more that could theoretically be done here, but 1206 // this is sufficient to catch some interesting cases. 1207 if (P->getNumIncomingValues() == 2) { 1208 for (unsigned i = 0; i != 2; ++i) { 1209 Value *L = P->getIncomingValue(i); 1210 Value *R = P->getIncomingValue(!i); 1211 Operator *LU = dyn_cast<Operator>(L); 1212 if (!LU) 1213 continue; 1214 unsigned Opcode = LU->getOpcode(); 1215 // Check for operations that have the property that if 1216 // both their operands have low zero bits, the result 1217 // will have low zero bits. 1218 if (Opcode == Instruction::Add || 1219 Opcode == Instruction::Sub || 1220 Opcode == Instruction::And || 1221 Opcode == Instruction::Or || 1222 Opcode == Instruction::Mul) { 1223 Value *LL = LU->getOperand(0); 1224 Value *LR = LU->getOperand(1); 1225 // Find a recurrence. 1226 if (LL == I) 1227 L = LR; 1228 else if (LR == I) 1229 L = LL; 1230 else 1231 break; 1232 // Ok, we have a PHI of the form L op= R. Check for low 1233 // zero bits. 1234 computeKnownBits(R, KnownZero2, KnownOne2, Depth + 1, Q); 1235 1236 // We need to take the minimum number of known bits 1237 APInt KnownZero3(KnownZero), KnownOne3(KnownOne); 1238 computeKnownBits(L, KnownZero3, KnownOne3, Depth + 1, Q); 1239 1240 KnownZero = APInt::getLowBitsSet(BitWidth, 1241 std::min(KnownZero2.countTrailingOnes(), 1242 KnownZero3.countTrailingOnes())); 1243 break; 1244 } 1245 } 1246 } 1247 1248 // Unreachable blocks may have zero-operand PHI nodes. 1249 if (P->getNumIncomingValues() == 0) 1250 break; 1251 1252 // Otherwise take the unions of the known bit sets of the operands, 1253 // taking conservative care to avoid excessive recursion. 1254 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { 1255 // Skip if every incoming value references to ourself. 1256 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1257 break; 1258 1259 KnownZero = APInt::getAllOnesValue(BitWidth); 1260 KnownOne = APInt::getAllOnesValue(BitWidth); 1261 for (Value *IncValue : P->incoming_values()) { 1262 // Skip direct self references. 1263 if (IncValue == P) continue; 1264 1265 KnownZero2 = APInt(BitWidth, 0); 1266 KnownOne2 = APInt(BitWidth, 0); 1267 // Recurse, but cap the recursion to one level, because we don't 1268 // want to waste time spinning around in loops. 1269 computeKnownBits(IncValue, KnownZero2, KnownOne2, MaxDepth - 1, Q); 1270 KnownZero &= KnownZero2; 1271 KnownOne &= KnownOne2; 1272 // If all bits have been ruled out, there's no need to check 1273 // more operands. 1274 if (!KnownZero && !KnownOne) 1275 break; 1276 } 1277 } 1278 break; 1279 } 1280 case Instruction::Call: 1281 case Instruction::Invoke: 1282 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range)) 1283 computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne); 1284 // If a range metadata is attached to this IntrinsicInst, intersect the 1285 // explicit range specified by the metadata and the implicit range of 1286 // the intrinsic. 1287 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1288 switch (II->getIntrinsicID()) { 1289 default: break; 1290 case Intrinsic::bswap: 1291 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 1292 KnownZero |= KnownZero2.byteSwap(); 1293 KnownOne |= KnownOne2.byteSwap(); 1294 break; 1295 case Intrinsic::ctlz: 1296 case Intrinsic::cttz: { 1297 unsigned LowBits = Log2_32(BitWidth)+1; 1298 // If this call is undefined for 0, the result will be less than 2^n. 1299 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1300 LowBits -= 1; 1301 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 1302 break; 1303 } 1304 case Intrinsic::ctpop: { 1305 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 1306 // We can bound the space the count needs. Also, bits known to be zero 1307 // can't contribute to the population. 1308 unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation(); 1309 unsigned LeadingZeros = 1310 APInt(BitWidth, BitsPossiblySet).countLeadingZeros(); 1311 assert(LeadingZeros <= BitWidth); 1312 KnownZero |= APInt::getHighBitsSet(BitWidth, LeadingZeros); 1313 KnownOne &= ~KnownZero; 1314 // TODO: we could bound KnownOne using the lower bound on the number 1315 // of bits which might be set provided by popcnt KnownOne2. 1316 break; 1317 } 1318 case Intrinsic::x86_sse42_crc32_64_64: 1319 KnownZero |= APInt::getHighBitsSet(64, 32); 1320 break; 1321 } 1322 } 1323 break; 1324 case Instruction::ExtractValue: 1325 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1326 ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1327 if (EVI->getNumIndices() != 1) break; 1328 if (EVI->getIndices()[0] == 0) { 1329 switch (II->getIntrinsicID()) { 1330 default: break; 1331 case Intrinsic::uadd_with_overflow: 1332 case Intrinsic::sadd_with_overflow: 1333 computeKnownBitsAddSub(true, II->getArgOperand(0), 1334 II->getArgOperand(1), false, KnownZero, 1335 KnownOne, KnownZero2, KnownOne2, Depth, Q); 1336 break; 1337 case Intrinsic::usub_with_overflow: 1338 case Intrinsic::ssub_with_overflow: 1339 computeKnownBitsAddSub(false, II->getArgOperand(0), 1340 II->getArgOperand(1), false, KnownZero, 1341 KnownOne, KnownZero2, KnownOne2, Depth, Q); 1342 break; 1343 case Intrinsic::umul_with_overflow: 1344 case Intrinsic::smul_with_overflow: 1345 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1346 KnownZero, KnownOne, KnownZero2, KnownOne2, Depth, 1347 Q); 1348 break; 1349 } 1350 } 1351 } 1352 } 1353 } 1354 1355 /// Determine which bits of V are known to be either zero or one and return 1356 /// them in the KnownZero/KnownOne bit sets. 1357 /// 1358 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1359 /// we cannot optimize based on the assumption that it is zero without changing 1360 /// it to be an explicit zero. If we don't change it to zero, other code could 1361 /// optimized based on the contradictory assumption that it is non-zero. 1362 /// Because instcombine aggressively folds operations with undef args anyway, 1363 /// this won't lose us code quality. 1364 /// 1365 /// This function is defined on values with integer type, values with pointer 1366 /// type, and vectors of integers. In the case 1367 /// where V is a vector, known zero, and known one values are the 1368 /// same width as the vector element, and the bit is set only if it is true 1369 /// for all of the elements in the vector. 1370 void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, 1371 unsigned Depth, const Query &Q) { 1372 assert(V && "No Value?"); 1373 assert(Depth <= MaxDepth && "Limit Search Depth"); 1374 unsigned BitWidth = KnownZero.getBitWidth(); 1375 1376 assert((V->getType()->isIntOrIntVectorTy() || 1377 V->getType()->getScalarType()->isPointerTy()) && 1378 "Not integer or pointer type!"); 1379 assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && 1380 (!V->getType()->isIntOrIntVectorTy() || 1381 V->getType()->getScalarSizeInBits() == BitWidth) && 1382 KnownZero.getBitWidth() == BitWidth && 1383 KnownOne.getBitWidth() == BitWidth && 1384 "V, KnownOne and KnownZero should have same BitWidth"); 1385 1386 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 1387 // We know all of the bits for a constant! 1388 KnownOne = CI->getValue(); 1389 KnownZero = ~KnownOne; 1390 return; 1391 } 1392 // Null and aggregate-zero are all-zeros. 1393 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1394 KnownOne.clearAllBits(); 1395 KnownZero = APInt::getAllOnesValue(BitWidth); 1396 return; 1397 } 1398 // Handle a constant vector by taking the intersection of the known bits of 1399 // each element. 1400 if (ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 1401 // We know that CDS must be a vector of integers. Take the intersection of 1402 // each element. 1403 KnownZero.setAllBits(); KnownOne.setAllBits(); 1404 APInt Elt(KnownZero.getBitWidth(), 0); 1405 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1406 Elt = CDS->getElementAsInteger(i); 1407 KnownZero &= ~Elt; 1408 KnownOne &= Elt; 1409 } 1410 return; 1411 } 1412 1413 if (auto *CV = dyn_cast<ConstantVector>(V)) { 1414 // We know that CV must be a vector of integers. Take the intersection of 1415 // each element. 1416 KnownZero.setAllBits(); KnownOne.setAllBits(); 1417 APInt Elt(KnownZero.getBitWidth(), 0); 1418 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1419 Constant *Element = CV->getAggregateElement(i); 1420 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1421 if (!ElementCI) { 1422 KnownZero.clearAllBits(); 1423 KnownOne.clearAllBits(); 1424 return; 1425 } 1426 Elt = ElementCI->getValue(); 1427 KnownZero &= ~Elt; 1428 KnownOne &= Elt; 1429 } 1430 return; 1431 } 1432 1433 // Start out not knowing anything. 1434 KnownZero.clearAllBits(); KnownOne.clearAllBits(); 1435 1436 // Limit search depth. 1437 // All recursive calls that increase depth must come after this. 1438 if (Depth == MaxDepth) 1439 return; 1440 1441 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1442 // the bits of its aliasee. 1443 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1444 if (!GA->isInterposable()) 1445 computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, Depth + 1, Q); 1446 return; 1447 } 1448 1449 if (Operator *I = dyn_cast<Operator>(V)) 1450 computeKnownBitsFromOperator(I, KnownZero, KnownOne, Depth, Q); 1451 1452 // Aligned pointers have trailing zeros - refine KnownZero set 1453 if (V->getType()->isPointerTy()) { 1454 unsigned Align = V->getPointerAlignment(Q.DL); 1455 if (Align) 1456 KnownZero |= APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align)); 1457 } 1458 1459 // computeKnownBitsFromAssume strictly refines KnownZero and 1460 // KnownOne. Therefore, we run them after computeKnownBitsFromOperator. 1461 1462 // Check whether a nearby assume intrinsic can determine some known bits. 1463 computeKnownBitsFromAssume(V, KnownZero, KnownOne, Depth, Q); 1464 1465 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1466 } 1467 1468 /// Determine whether the sign bit is known to be zero or one. 1469 /// Convenience wrapper around computeKnownBits. 1470 void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, 1471 unsigned Depth, const Query &Q) { 1472 unsigned BitWidth = getBitWidth(V->getType(), Q.DL); 1473 if (!BitWidth) { 1474 KnownZero = false; 1475 KnownOne = false; 1476 return; 1477 } 1478 APInt ZeroBits(BitWidth, 0); 1479 APInt OneBits(BitWidth, 0); 1480 computeKnownBits(V, ZeroBits, OneBits, Depth, Q); 1481 KnownOne = OneBits[BitWidth - 1]; 1482 KnownZero = ZeroBits[BitWidth - 1]; 1483 } 1484 1485 /// Return true if the given value is known to have exactly one 1486 /// bit set when defined. For vectors return true if every element is known to 1487 /// be a power of two when defined. Supports values with integer or pointer 1488 /// types and vectors of integers. 1489 bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth, 1490 const Query &Q) { 1491 if (Constant *C = dyn_cast<Constant>(V)) { 1492 if (C->isNullValue()) 1493 return OrZero; 1494 1495 const APInt *ConstIntOrConstSplatInt; 1496 if (match(C, m_APInt(ConstIntOrConstSplatInt))) 1497 return ConstIntOrConstSplatInt->isPowerOf2(); 1498 } 1499 1500 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1501 // it is shifted off the end then the result is undefined. 1502 if (match(V, m_Shl(m_One(), m_Value()))) 1503 return true; 1504 1505 // (signbit) >>l X is clearly a power of two if the one is not shifted off the 1506 // bottom. If it is shifted off the bottom then the result is undefined. 1507 if (match(V, m_LShr(m_SignBit(), m_Value()))) 1508 return true; 1509 1510 // The remaining tests are all recursive, so bail out if we hit the limit. 1511 if (Depth++ == MaxDepth) 1512 return false; 1513 1514 Value *X = nullptr, *Y = nullptr; 1515 // A shift left or a logical shift right of a power of two is a power of two 1516 // or zero. 1517 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1518 match(V, m_LShr(m_Value(X), m_Value())))) 1519 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 1520 1521 if (ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1522 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 1523 1524 if (SelectInst *SI = dyn_cast<SelectInst>(V)) 1525 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 1526 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 1527 1528 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1529 // A power of two and'd with anything is a power of two or zero. 1530 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 1531 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 1532 return true; 1533 // X & (-X) is always a power of two or zero. 1534 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1535 return true; 1536 return false; 1537 } 1538 1539 // Adding a power-of-two or zero to the same power-of-two or zero yields 1540 // either the original power-of-two, a larger power-of-two or zero. 1541 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1542 OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1543 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { 1544 if (match(X, m_And(m_Specific(Y), m_Value())) || 1545 match(X, m_And(m_Value(), m_Specific(Y)))) 1546 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 1547 return true; 1548 if (match(Y, m_And(m_Specific(X), m_Value())) || 1549 match(Y, m_And(m_Value(), m_Specific(X)))) 1550 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 1551 return true; 1552 1553 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1554 APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0); 1555 computeKnownBits(X, LHSZeroBits, LHSOneBits, Depth, Q); 1556 1557 APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0); 1558 computeKnownBits(Y, RHSZeroBits, RHSOneBits, Depth, Q); 1559 // If i8 V is a power of two or zero: 1560 // ZeroBits: 1 1 1 0 1 1 1 1 1561 // ~ZeroBits: 0 0 0 1 0 0 0 0 1562 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2()) 1563 // If OrZero isn't set, we cannot give back a zero result. 1564 // Make sure either the LHS or RHS has a bit set. 1565 if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue()) 1566 return true; 1567 } 1568 } 1569 1570 // An exact divide or right shift can only shift off zero bits, so the result 1571 // is a power of two only if the first operand is a power of two and not 1572 // copying a sign bit (sdiv int_min, 2). 1573 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1574 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1575 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1576 Depth, Q); 1577 } 1578 1579 return false; 1580 } 1581 1582 /// \brief Test whether a GEP's result is known to be non-null. 1583 /// 1584 /// Uses properties inherent in a GEP to try to determine whether it is known 1585 /// to be non-null. 1586 /// 1587 /// Currently this routine does not support vector GEPs. 1588 static bool isGEPKnownNonNull(GEPOperator *GEP, unsigned Depth, 1589 const Query &Q) { 1590 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) 1591 return false; 1592 1593 // FIXME: Support vector-GEPs. 1594 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1595 1596 // If the base pointer is non-null, we cannot walk to a null address with an 1597 // inbounds GEP in address space zero. 1598 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 1599 return true; 1600 1601 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1602 // If so, then the GEP cannot produce a null pointer, as doing so would 1603 // inherently violate the inbounds contract within address space zero. 1604 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1605 GTI != GTE; ++GTI) { 1606 // Struct types are easy -- they must always be indexed by a constant. 1607 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 1608 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1609 unsigned ElementIdx = OpC->getZExtValue(); 1610 const StructLayout *SL = Q.DL.getStructLayout(STy); 1611 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1612 if (ElementOffset > 0) 1613 return true; 1614 continue; 1615 } 1616 1617 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1618 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1619 continue; 1620 1621 // Fast path the constant operand case both for efficiency and so we don't 1622 // increment Depth when just zipping down an all-constant GEP. 1623 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1624 if (!OpC->isZero()) 1625 return true; 1626 continue; 1627 } 1628 1629 // We post-increment Depth here because while isKnownNonZero increments it 1630 // as well, when we pop back up that increment won't persist. We don't want 1631 // to recurse 10k times just because we have 10k GEP operands. We don't 1632 // bail completely out because we want to handle constant GEPs regardless 1633 // of depth. 1634 if (Depth++ >= MaxDepth) 1635 continue; 1636 1637 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 1638 return true; 1639 } 1640 1641 return false; 1642 } 1643 1644 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1645 /// ensure that the value it's attached to is never Value? 'RangeType' is 1646 /// is the type of the value described by the range. 1647 static bool rangeMetadataExcludesValue(MDNode* Ranges, const APInt& Value) { 1648 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1649 assert(NumRanges >= 1); 1650 for (unsigned i = 0; i < NumRanges; ++i) { 1651 ConstantInt *Lower = 1652 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1653 ConstantInt *Upper = 1654 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1655 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1656 if (Range.contains(Value)) 1657 return false; 1658 } 1659 return true; 1660 } 1661 1662 /// Return true if the given value is known to be non-zero when defined. 1663 /// For vectors return true if every element is known to be non-zero when 1664 /// defined. Supports values with integer or pointer type and vectors of 1665 /// integers. 1666 bool isKnownNonZero(Value *V, unsigned Depth, const Query &Q) { 1667 if (auto *C = dyn_cast<Constant>(V)) { 1668 if (C->isNullValue()) 1669 return false; 1670 if (isa<ConstantInt>(C)) 1671 // Must be non-zero due to null test above. 1672 return true; 1673 1674 // For constant vectors, check that all elements are undefined or known 1675 // non-zero to determine that the whole vector is known non-zero. 1676 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) { 1677 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 1678 Constant *Elt = C->getAggregateElement(i); 1679 if (!Elt || Elt->isNullValue()) 1680 return false; 1681 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 1682 return false; 1683 } 1684 return true; 1685 } 1686 1687 return false; 1688 } 1689 1690 if (auto *I = dyn_cast<Instruction>(V)) { 1691 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) { 1692 // If the possible ranges don't contain zero, then the value is 1693 // definitely non-zero. 1694 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 1695 const APInt ZeroValue(Ty->getBitWidth(), 0); 1696 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 1697 return true; 1698 } 1699 } 1700 } 1701 1702 // The remaining tests are all recursive, so bail out if we hit the limit. 1703 if (Depth++ >= MaxDepth) 1704 return false; 1705 1706 // Check for pointer simplifications. 1707 if (V->getType()->isPointerTy()) { 1708 if (isKnownNonNull(V)) 1709 return true; 1710 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 1711 if (isGEPKnownNonNull(GEP, Depth, Q)) 1712 return true; 1713 } 1714 1715 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 1716 1717 // X | Y != 0 if X != 0 or Y != 0. 1718 Value *X = nullptr, *Y = nullptr; 1719 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 1720 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q); 1721 1722 // ext X != 0 if X != 0. 1723 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 1724 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 1725 1726 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 1727 // if the lowest bit is shifted off the end. 1728 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) { 1729 // shl nuw can't remove any non-zero bits. 1730 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1731 if (BO->hasNoUnsignedWrap()) 1732 return isKnownNonZero(X, Depth, Q); 1733 1734 APInt KnownZero(BitWidth, 0); 1735 APInt KnownOne(BitWidth, 0); 1736 computeKnownBits(X, KnownZero, KnownOne, Depth, Q); 1737 if (KnownOne[0]) 1738 return true; 1739 } 1740 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 1741 // defined if the sign bit is shifted off the end. 1742 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 1743 // shr exact can only shift out zero bits. 1744 PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 1745 if (BO->isExact()) 1746 return isKnownNonZero(X, Depth, Q); 1747 1748 bool XKnownNonNegative, XKnownNegative; 1749 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q); 1750 if (XKnownNegative) 1751 return true; 1752 1753 // If the shifter operand is a constant, and all of the bits shifted 1754 // out are known to be zero, and X is known non-zero then at least one 1755 // non-zero bit must remain. 1756 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 1757 APInt KnownZero(BitWidth, 0); 1758 APInt KnownOne(BitWidth, 0); 1759 computeKnownBits(X, KnownZero, KnownOne, Depth, Q); 1760 1761 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 1762 // Is there a known one in the portion not shifted out? 1763 if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal) 1764 return true; 1765 // Are all the bits to be shifted out known zero? 1766 if (KnownZero.countTrailingOnes() >= ShiftVal) 1767 return isKnownNonZero(X, Depth, Q); 1768 } 1769 } 1770 // div exact can only produce a zero if the dividend is zero. 1771 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 1772 return isKnownNonZero(X, Depth, Q); 1773 } 1774 // X + Y. 1775 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1776 bool XKnownNonNegative, XKnownNegative; 1777 bool YKnownNonNegative, YKnownNegative; 1778 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q); 1779 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Depth, Q); 1780 1781 // If X and Y are both non-negative (as signed values) then their sum is not 1782 // zero unless both X and Y are zero. 1783 if (XKnownNonNegative && YKnownNonNegative) 1784 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q)) 1785 return true; 1786 1787 // If X and Y are both negative (as signed values) then their sum is not 1788 // zero unless both X and Y equal INT_MIN. 1789 if (BitWidth && XKnownNegative && YKnownNegative) { 1790 APInt KnownZero(BitWidth, 0); 1791 APInt KnownOne(BitWidth, 0); 1792 APInt Mask = APInt::getSignedMaxValue(BitWidth); 1793 // The sign bit of X is set. If some other bit is set then X is not equal 1794 // to INT_MIN. 1795 computeKnownBits(X, KnownZero, KnownOne, Depth, Q); 1796 if ((KnownOne & Mask) != 0) 1797 return true; 1798 // The sign bit of Y is set. If some other bit is set then Y is not equal 1799 // to INT_MIN. 1800 computeKnownBits(Y, KnownZero, KnownOne, Depth, Q); 1801 if ((KnownOne & Mask) != 0) 1802 return true; 1803 } 1804 1805 // The sum of a non-negative number and a power of two is not zero. 1806 if (XKnownNonNegative && 1807 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 1808 return true; 1809 if (YKnownNonNegative && 1810 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 1811 return true; 1812 } 1813 // X * Y. 1814 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 1815 OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1816 // If X and Y are non-zero then so is X * Y as long as the multiplication 1817 // does not overflow. 1818 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 1819 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q)) 1820 return true; 1821 } 1822 // (C ? X : Y) != 0 if X != 0 and Y != 0. 1823 else if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 1824 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) && 1825 isKnownNonZero(SI->getFalseValue(), Depth, Q)) 1826 return true; 1827 } 1828 // PHI 1829 else if (PHINode *PN = dyn_cast<PHINode>(V)) { 1830 // Try and detect a recurrence that monotonically increases from a 1831 // starting value, as these are common as induction variables. 1832 if (PN->getNumIncomingValues() == 2) { 1833 Value *Start = PN->getIncomingValue(0); 1834 Value *Induction = PN->getIncomingValue(1); 1835 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 1836 std::swap(Start, Induction); 1837 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 1838 if (!C->isZero() && !C->isNegative()) { 1839 ConstantInt *X; 1840 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 1841 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 1842 !X->isNegative()) 1843 return true; 1844 } 1845 } 1846 } 1847 // Check if all incoming values are non-zero constant. 1848 bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) { 1849 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue(); 1850 }); 1851 if (AllNonZeroConstants) 1852 return true; 1853 } 1854 1855 if (!BitWidth) return false; 1856 APInt KnownZero(BitWidth, 0); 1857 APInt KnownOne(BitWidth, 0); 1858 computeKnownBits(V, KnownZero, KnownOne, Depth, Q); 1859 return KnownOne != 0; 1860 } 1861 1862 /// Return true if V2 == V1 + X, where X is known non-zero. 1863 static bool isAddOfNonZero(Value *V1, Value *V2, const Query &Q) { 1864 BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 1865 if (!BO || BO->getOpcode() != Instruction::Add) 1866 return false; 1867 Value *Op = nullptr; 1868 if (V2 == BO->getOperand(0)) 1869 Op = BO->getOperand(1); 1870 else if (V2 == BO->getOperand(1)) 1871 Op = BO->getOperand(0); 1872 else 1873 return false; 1874 return isKnownNonZero(Op, 0, Q); 1875 } 1876 1877 /// Return true if it is known that V1 != V2. 1878 static bool isKnownNonEqual(Value *V1, Value *V2, const Query &Q) { 1879 if (V1->getType()->isVectorTy() || V1 == V2) 1880 return false; 1881 if (V1->getType() != V2->getType()) 1882 // We can't look through casts yet. 1883 return false; 1884 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) 1885 return true; 1886 1887 if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) { 1888 // Are any known bits in V1 contradictory to known bits in V2? If V1 1889 // has a known zero where V2 has a known one, they must not be equal. 1890 auto BitWidth = Ty->getBitWidth(); 1891 APInt KnownZero1(BitWidth, 0); 1892 APInt KnownOne1(BitWidth, 0); 1893 computeKnownBits(V1, KnownZero1, KnownOne1, 0, Q); 1894 APInt KnownZero2(BitWidth, 0); 1895 APInt KnownOne2(BitWidth, 0); 1896 computeKnownBits(V2, KnownZero2, KnownOne2, 0, Q); 1897 1898 auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1); 1899 if (OppositeBits.getBoolValue()) 1900 return true; 1901 } 1902 return false; 1903 } 1904 1905 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 1906 /// simplify operations downstream. Mask is known to be zero for bits that V 1907 /// cannot have. 1908 /// 1909 /// This function is defined on values with integer type, values with pointer 1910 /// type, and vectors of integers. In the case 1911 /// where V is a vector, the mask, known zero, and known one values are the 1912 /// same width as the vector element, and the bit is set only if it is true 1913 /// for all of the elements in the vector. 1914 bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth, 1915 const Query &Q) { 1916 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); 1917 computeKnownBits(V, KnownZero, KnownOne, Depth, Q); 1918 return (KnownZero & Mask) == Mask; 1919 } 1920 1921 /// For vector constants, loop over the elements and find the constant with the 1922 /// minimum number of sign bits. Return 0 if the value is not a vector constant 1923 /// or if any element was not analyzed; otherwise, return the count for the 1924 /// element with the minimum number of sign bits. 1925 static unsigned computeNumSignBitsVectorConstant(Value *V, unsigned TyBits) { 1926 auto *CV = dyn_cast<Constant>(V); 1927 if (!CV || !CV->getType()->isVectorTy()) 1928 return 0; 1929 1930 unsigned MinSignBits = TyBits; 1931 unsigned NumElts = CV->getType()->getVectorNumElements(); 1932 for (unsigned i = 0; i != NumElts; ++i) { 1933 // If we find a non-ConstantInt, bail out. 1934 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 1935 if (!Elt) 1936 return 0; 1937 1938 // If the sign bit is 1, flip the bits, so we always count leading zeros. 1939 APInt EltVal = Elt->getValue(); 1940 if (EltVal.isNegative()) 1941 EltVal = ~EltVal; 1942 MinSignBits = std::min(MinSignBits, EltVal.countLeadingZeros()); 1943 } 1944 1945 return MinSignBits; 1946 } 1947 1948 /// Return the number of times the sign bit of the register is replicated into 1949 /// the other bits. We know that at least 1 bit is always equal to the sign bit 1950 /// (itself), but other cases can give us information. For example, immediately 1951 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 1952 /// other, so we return 3. For vectors, return the number of sign bits for the 1953 /// vector element with the mininum number of known sign bits. 1954 unsigned ComputeNumSignBits(Value *V, unsigned Depth, const Query &Q) { 1955 unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType()); 1956 unsigned Tmp, Tmp2; 1957 unsigned FirstAnswer = 1; 1958 1959 // Note that ConstantInt is handled by the general computeKnownBits case 1960 // below. 1961 1962 if (Depth == 6) 1963 return 1; // Limit search depth. 1964 1965 Operator *U = dyn_cast<Operator>(V); 1966 switch (Operator::getOpcode(V)) { 1967 default: break; 1968 case Instruction::SExt: 1969 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 1970 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 1971 1972 case Instruction::SDiv: { 1973 const APInt *Denominator; 1974 // sdiv X, C -> adds log(C) sign bits. 1975 if (match(U->getOperand(1), m_APInt(Denominator))) { 1976 1977 // Ignore non-positive denominator. 1978 if (!Denominator->isStrictlyPositive()) 1979 break; 1980 1981 // Calculate the incoming numerator bits. 1982 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 1983 1984 // Add floor(log(C)) bits to the numerator bits. 1985 return std::min(TyBits, NumBits + Denominator->logBase2()); 1986 } 1987 break; 1988 } 1989 1990 case Instruction::SRem: { 1991 const APInt *Denominator; 1992 // srem X, C -> we know that the result is within [-C+1,C) when C is a 1993 // positive constant. This let us put a lower bound on the number of sign 1994 // bits. 1995 if (match(U->getOperand(1), m_APInt(Denominator))) { 1996 1997 // Ignore non-positive denominator. 1998 if (!Denominator->isStrictlyPositive()) 1999 break; 2000 2001 // Calculate the incoming numerator bits. SRem by a positive constant 2002 // can't lower the number of sign bits. 2003 unsigned NumrBits = 2004 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2005 2006 // Calculate the leading sign bit constraints by examining the 2007 // denominator. Given that the denominator is positive, there are two 2008 // cases: 2009 // 2010 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2011 // (1 << ceilLogBase2(C)). 2012 // 2013 // 2. the numerator is negative. Then the result range is (-C,0] and 2014 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2015 // 2016 // Thus a lower bound on the number of sign bits is `TyBits - 2017 // ceilLogBase2(C)`. 2018 2019 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2020 return std::max(NumrBits, ResBits); 2021 } 2022 break; 2023 } 2024 2025 case Instruction::AShr: { 2026 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2027 // ashr X, C -> adds C sign bits. Vectors too. 2028 const APInt *ShAmt; 2029 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2030 Tmp += ShAmt->getZExtValue(); 2031 if (Tmp > TyBits) Tmp = TyBits; 2032 } 2033 return Tmp; 2034 } 2035 case Instruction::Shl: { 2036 const APInt *ShAmt; 2037 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2038 // shl destroys sign bits. 2039 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2040 Tmp2 = ShAmt->getZExtValue(); 2041 if (Tmp2 >= TyBits || // Bad shift. 2042 Tmp2 >= Tmp) break; // Shifted all sign bits out. 2043 return Tmp - Tmp2; 2044 } 2045 break; 2046 } 2047 case Instruction::And: 2048 case Instruction::Or: 2049 case Instruction::Xor: // NOT is handled here. 2050 // Logical binary ops preserve the number of sign bits at the worst. 2051 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2052 if (Tmp != 1) { 2053 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2054 FirstAnswer = std::min(Tmp, Tmp2); 2055 // We computed what we know about the sign bits as our first 2056 // answer. Now proceed to the generic code that uses 2057 // computeKnownBits, and pick whichever answer is better. 2058 } 2059 break; 2060 2061 case Instruction::Select: 2062 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2063 if (Tmp == 1) return 1; // Early out. 2064 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 2065 return std::min(Tmp, Tmp2); 2066 2067 case Instruction::Add: 2068 // Add can have at most one carry bit. Thus we know that the output 2069 // is, at worst, one more bit than the inputs. 2070 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2071 if (Tmp == 1) return 1; // Early out. 2072 2073 // Special case decrementing a value (ADD X, -1): 2074 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2075 if (CRHS->isAllOnesValue()) { 2076 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 2077 computeKnownBits(U->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 2078 2079 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2080 // sign bits set. 2081 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 2082 return TyBits; 2083 2084 // If we are subtracting one from a positive number, there is no carry 2085 // out of the result. 2086 if (KnownZero.isNegative()) 2087 return Tmp; 2088 } 2089 2090 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2091 if (Tmp2 == 1) return 1; 2092 return std::min(Tmp, Tmp2)-1; 2093 2094 case Instruction::Sub: 2095 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2096 if (Tmp2 == 1) return 1; 2097 2098 // Handle NEG. 2099 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2100 if (CLHS->isNullValue()) { 2101 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 2102 computeKnownBits(U->getOperand(1), KnownZero, KnownOne, Depth + 1, Q); 2103 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2104 // sign bits set. 2105 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 2106 return TyBits; 2107 2108 // If the input is known to be positive (the sign bit is known clear), 2109 // the output of the NEG has the same number of sign bits as the input. 2110 if (KnownZero.isNegative()) 2111 return Tmp2; 2112 2113 // Otherwise, we treat this like a SUB. 2114 } 2115 2116 // Sub can have at most one carry bit. Thus we know that the output 2117 // is, at worst, one more bit than the inputs. 2118 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2119 if (Tmp == 1) return 1; // Early out. 2120 return std::min(Tmp, Tmp2)-1; 2121 2122 case Instruction::PHI: { 2123 PHINode *PN = cast<PHINode>(U); 2124 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2125 // Don't analyze large in-degree PHIs. 2126 if (NumIncomingValues > 4) break; 2127 // Unreachable blocks may have zero-operand PHI nodes. 2128 if (NumIncomingValues == 0) break; 2129 2130 // Take the minimum of all incoming values. This can't infinitely loop 2131 // because of our depth threshold. 2132 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q); 2133 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2134 if (Tmp == 1) return Tmp; 2135 Tmp = std::min( 2136 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q)); 2137 } 2138 return Tmp; 2139 } 2140 2141 case Instruction::Trunc: 2142 // FIXME: it's tricky to do anything useful for this, but it is an important 2143 // case for targets like X86. 2144 break; 2145 } 2146 2147 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2148 // use this information. 2149 2150 // If we can examine all elements of a vector constant successfully, we're 2151 // done (we can't do any better than that). If not, keep trying. 2152 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits)) 2153 return VecSignBits; 2154 2155 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 2156 computeKnownBits(V, KnownZero, KnownOne, Depth, Q); 2157 2158 // If we know that the sign bit is either zero or one, determine the number of 2159 // identical bits in the top of the input value. 2160 if (KnownZero.isNegative()) 2161 return std::max(FirstAnswer, KnownZero.countLeadingOnes()); 2162 2163 if (KnownOne.isNegative()) 2164 return std::max(FirstAnswer, KnownOne.countLeadingOnes()); 2165 2166 // computeKnownBits gave us no extra information about the top bits. 2167 return FirstAnswer; 2168 } 2169 2170 /// This function computes the integer multiple of Base that equals V. 2171 /// If successful, it returns true and returns the multiple in 2172 /// Multiple. If unsuccessful, it returns false. It looks 2173 /// through SExt instructions only if LookThroughSExt is true. 2174 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2175 bool LookThroughSExt, unsigned Depth) { 2176 const unsigned MaxDepth = 6; 2177 2178 assert(V && "No Value?"); 2179 assert(Depth <= MaxDepth && "Limit Search Depth"); 2180 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2181 2182 Type *T = V->getType(); 2183 2184 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2185 2186 if (Base == 0) 2187 return false; 2188 2189 if (Base == 1) { 2190 Multiple = V; 2191 return true; 2192 } 2193 2194 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2195 Constant *BaseVal = ConstantInt::get(T, Base); 2196 if (CO && CO == BaseVal) { 2197 // Multiple is 1. 2198 Multiple = ConstantInt::get(T, 1); 2199 return true; 2200 } 2201 2202 if (CI && CI->getZExtValue() % Base == 0) { 2203 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2204 return true; 2205 } 2206 2207 if (Depth == MaxDepth) return false; // Limit search depth. 2208 2209 Operator *I = dyn_cast<Operator>(V); 2210 if (!I) return false; 2211 2212 switch (I->getOpcode()) { 2213 default: break; 2214 case Instruction::SExt: 2215 if (!LookThroughSExt) return false; 2216 // otherwise fall through to ZExt 2217 case Instruction::ZExt: 2218 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2219 LookThroughSExt, Depth+1); 2220 case Instruction::Shl: 2221 case Instruction::Mul: { 2222 Value *Op0 = I->getOperand(0); 2223 Value *Op1 = I->getOperand(1); 2224 2225 if (I->getOpcode() == Instruction::Shl) { 2226 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2227 if (!Op1CI) return false; 2228 // Turn Op0 << Op1 into Op0 * 2^Op1 2229 APInt Op1Int = Op1CI->getValue(); 2230 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2231 APInt API(Op1Int.getBitWidth(), 0); 2232 API.setBit(BitToSet); 2233 Op1 = ConstantInt::get(V->getContext(), API); 2234 } 2235 2236 Value *Mul0 = nullptr; 2237 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2238 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2239 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2240 if (Op1C->getType()->getPrimitiveSizeInBits() < 2241 MulC->getType()->getPrimitiveSizeInBits()) 2242 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2243 if (Op1C->getType()->getPrimitiveSizeInBits() > 2244 MulC->getType()->getPrimitiveSizeInBits()) 2245 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2246 2247 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2248 Multiple = ConstantExpr::getMul(MulC, Op1C); 2249 return true; 2250 } 2251 2252 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2253 if (Mul0CI->getValue() == 1) { 2254 // V == Base * Op1, so return Op1 2255 Multiple = Op1; 2256 return true; 2257 } 2258 } 2259 2260 Value *Mul1 = nullptr; 2261 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2262 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2263 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2264 if (Op0C->getType()->getPrimitiveSizeInBits() < 2265 MulC->getType()->getPrimitiveSizeInBits()) 2266 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2267 if (Op0C->getType()->getPrimitiveSizeInBits() > 2268 MulC->getType()->getPrimitiveSizeInBits()) 2269 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2270 2271 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2272 Multiple = ConstantExpr::getMul(MulC, Op0C); 2273 return true; 2274 } 2275 2276 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2277 if (Mul1CI->getValue() == 1) { 2278 // V == Base * Op0, so return Op0 2279 Multiple = Op0; 2280 return true; 2281 } 2282 } 2283 } 2284 } 2285 2286 // We could not determine if V is a multiple of Base. 2287 return false; 2288 } 2289 2290 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS, 2291 const TargetLibraryInfo *TLI) { 2292 const Function *F = ICS.getCalledFunction(); 2293 if (!F) 2294 return Intrinsic::not_intrinsic; 2295 2296 if (F->isIntrinsic()) 2297 return F->getIntrinsicID(); 2298 2299 if (!TLI) 2300 return Intrinsic::not_intrinsic; 2301 2302 LibFunc::Func Func; 2303 // We're going to make assumptions on the semantics of the functions, check 2304 // that the target knows that it's available in this environment and it does 2305 // not have local linkage. 2306 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func)) 2307 return Intrinsic::not_intrinsic; 2308 2309 if (!ICS.onlyReadsMemory()) 2310 return Intrinsic::not_intrinsic; 2311 2312 // Otherwise check if we have a call to a function that can be turned into a 2313 // vector intrinsic. 2314 switch (Func) { 2315 default: 2316 break; 2317 case LibFunc::sin: 2318 case LibFunc::sinf: 2319 case LibFunc::sinl: 2320 return Intrinsic::sin; 2321 case LibFunc::cos: 2322 case LibFunc::cosf: 2323 case LibFunc::cosl: 2324 return Intrinsic::cos; 2325 case LibFunc::exp: 2326 case LibFunc::expf: 2327 case LibFunc::expl: 2328 return Intrinsic::exp; 2329 case LibFunc::exp2: 2330 case LibFunc::exp2f: 2331 case LibFunc::exp2l: 2332 return Intrinsic::exp2; 2333 case LibFunc::log: 2334 case LibFunc::logf: 2335 case LibFunc::logl: 2336 return Intrinsic::log; 2337 case LibFunc::log10: 2338 case LibFunc::log10f: 2339 case LibFunc::log10l: 2340 return Intrinsic::log10; 2341 case LibFunc::log2: 2342 case LibFunc::log2f: 2343 case LibFunc::log2l: 2344 return Intrinsic::log2; 2345 case LibFunc::fabs: 2346 case LibFunc::fabsf: 2347 case LibFunc::fabsl: 2348 return Intrinsic::fabs; 2349 case LibFunc::fmin: 2350 case LibFunc::fminf: 2351 case LibFunc::fminl: 2352 return Intrinsic::minnum; 2353 case LibFunc::fmax: 2354 case LibFunc::fmaxf: 2355 case LibFunc::fmaxl: 2356 return Intrinsic::maxnum; 2357 case LibFunc::copysign: 2358 case LibFunc::copysignf: 2359 case LibFunc::copysignl: 2360 return Intrinsic::copysign; 2361 case LibFunc::floor: 2362 case LibFunc::floorf: 2363 case LibFunc::floorl: 2364 return Intrinsic::floor; 2365 case LibFunc::ceil: 2366 case LibFunc::ceilf: 2367 case LibFunc::ceill: 2368 return Intrinsic::ceil; 2369 case LibFunc::trunc: 2370 case LibFunc::truncf: 2371 case LibFunc::truncl: 2372 return Intrinsic::trunc; 2373 case LibFunc::rint: 2374 case LibFunc::rintf: 2375 case LibFunc::rintl: 2376 return Intrinsic::rint; 2377 case LibFunc::nearbyint: 2378 case LibFunc::nearbyintf: 2379 case LibFunc::nearbyintl: 2380 return Intrinsic::nearbyint; 2381 case LibFunc::round: 2382 case LibFunc::roundf: 2383 case LibFunc::roundl: 2384 return Intrinsic::round; 2385 case LibFunc::pow: 2386 case LibFunc::powf: 2387 case LibFunc::powl: 2388 return Intrinsic::pow; 2389 case LibFunc::sqrt: 2390 case LibFunc::sqrtf: 2391 case LibFunc::sqrtl: 2392 if (ICS->hasNoNaNs()) 2393 return Intrinsic::sqrt; 2394 return Intrinsic::not_intrinsic; 2395 } 2396 2397 return Intrinsic::not_intrinsic; 2398 } 2399 2400 /// Return true if we can prove that the specified FP value is never equal to 2401 /// -0.0. 2402 /// 2403 /// NOTE: this function will need to be revisited when we support non-default 2404 /// rounding modes! 2405 /// 2406 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 2407 unsigned Depth) { 2408 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 2409 return !CFP->getValueAPF().isNegZero(); 2410 2411 // FIXME: Magic number! At the least, this should be given a name because it's 2412 // used similarly in CannotBeOrderedLessThanZero(). A better fix may be to 2413 // expose it as a parameter, so it can be used for testing / experimenting. 2414 if (Depth == 6) 2415 return false; // Limit search depth. 2416 2417 const Operator *I = dyn_cast<Operator>(V); 2418 if (!I) return false; 2419 2420 // Check if the nsz fast-math flag is set 2421 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I)) 2422 if (FPO->hasNoSignedZeros()) 2423 return true; 2424 2425 // (add x, 0.0) is guaranteed to return +0.0, not -0.0. 2426 if (I->getOpcode() == Instruction::FAdd) 2427 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1))) 2428 if (CFP->isNullValue()) 2429 return true; 2430 2431 // sitofp and uitofp turn into +0.0 for zero. 2432 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) 2433 return true; 2434 2435 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 2436 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2437 switch (IID) { 2438 default: 2439 break; 2440 // sqrt(-0.0) = -0.0, no other negative results are possible. 2441 case Intrinsic::sqrt: 2442 return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1); 2443 // fabs(x) != -0.0 2444 case Intrinsic::fabs: 2445 return true; 2446 } 2447 } 2448 2449 return false; 2450 } 2451 2452 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 2453 const TargetLibraryInfo *TLI, 2454 unsigned Depth) { 2455 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 2456 return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero(); 2457 2458 // FIXME: Magic number! At the least, this should be given a name because it's 2459 // used similarly in CannotBeNegativeZero(). A better fix may be to 2460 // expose it as a parameter, so it can be used for testing / experimenting. 2461 if (Depth == 6) 2462 return false; // Limit search depth. 2463 2464 const Operator *I = dyn_cast<Operator>(V); 2465 if (!I) return false; 2466 2467 switch (I->getOpcode()) { 2468 default: break; 2469 // Unsigned integers are always nonnegative. 2470 case Instruction::UIToFP: 2471 return true; 2472 case Instruction::FMul: 2473 // x*x is always non-negative or a NaN. 2474 if (I->getOperand(0) == I->getOperand(1)) 2475 return true; 2476 // Fall through 2477 case Instruction::FAdd: 2478 case Instruction::FDiv: 2479 case Instruction::FRem: 2480 return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) && 2481 CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1); 2482 case Instruction::Select: 2483 return CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1) && 2484 CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1); 2485 case Instruction::FPExt: 2486 case Instruction::FPTrunc: 2487 // Widening/narrowing never change sign. 2488 return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1); 2489 case Instruction::Call: 2490 Intrinsic::ID IID = getIntrinsicForCallSite(cast<CallInst>(I), TLI); 2491 switch (IID) { 2492 default: 2493 break; 2494 case Intrinsic::maxnum: 2495 return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) || 2496 CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1); 2497 case Intrinsic::minnum: 2498 return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) && 2499 CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1); 2500 case Intrinsic::exp: 2501 case Intrinsic::exp2: 2502 case Intrinsic::fabs: 2503 case Intrinsic::sqrt: 2504 return true; 2505 case Intrinsic::powi: 2506 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 2507 // powi(x,n) is non-negative if n is even. 2508 if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0) 2509 return true; 2510 } 2511 return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1); 2512 case Intrinsic::fma: 2513 case Intrinsic::fmuladd: 2514 // x*x+y is non-negative if y is non-negative. 2515 return I->getOperand(0) == I->getOperand(1) && 2516 CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1); 2517 } 2518 break; 2519 } 2520 return false; 2521 } 2522 2523 /// If the specified value can be set by repeating the same byte in memory, 2524 /// return the i8 value that it is represented with. This is 2525 /// true for all i8 values obviously, but is also true for i32 0, i32 -1, 2526 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 2527 /// byte store (e.g. i16 0x1234), return null. 2528 Value *llvm::isBytewiseValue(Value *V) { 2529 // All byte-wide stores are splatable, even of arbitrary variables. 2530 if (V->getType()->isIntegerTy(8)) return V; 2531 2532 // Handle 'null' ConstantArrayZero etc. 2533 if (Constant *C = dyn_cast<Constant>(V)) 2534 if (C->isNullValue()) 2535 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 2536 2537 // Constant float and double values can be handled as integer values if the 2538 // corresponding integer value is "byteable". An important case is 0.0. 2539 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2540 if (CFP->getType()->isFloatTy()) 2541 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 2542 if (CFP->getType()->isDoubleTy()) 2543 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 2544 // Don't handle long double formats, which have strange constraints. 2545 } 2546 2547 // We can handle constant integers that are multiple of 8 bits. 2548 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 2549 if (CI->getBitWidth() % 8 == 0) { 2550 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 2551 2552 if (!CI->getValue().isSplat(8)) 2553 return nullptr; 2554 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8)); 2555 } 2556 } 2557 2558 // A ConstantDataArray/Vector is splatable if all its members are equal and 2559 // also splatable. 2560 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 2561 Value *Elt = CA->getElementAsConstant(0); 2562 Value *Val = isBytewiseValue(Elt); 2563 if (!Val) 2564 return nullptr; 2565 2566 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 2567 if (CA->getElementAsConstant(I) != Elt) 2568 return nullptr; 2569 2570 return Val; 2571 } 2572 2573 // Conceptually, we could handle things like: 2574 // %a = zext i8 %X to i16 2575 // %b = shl i16 %a, 8 2576 // %c = or i16 %a, %b 2577 // but until there is an example that actually needs this, it doesn't seem 2578 // worth worrying about. 2579 return nullptr; 2580 } 2581 2582 2583 // This is the recursive version of BuildSubAggregate. It takes a few different 2584 // arguments. Idxs is the index within the nested struct From that we are 2585 // looking at now (which is of type IndexedType). IdxSkip is the number of 2586 // indices from Idxs that should be left out when inserting into the resulting 2587 // struct. To is the result struct built so far, new insertvalue instructions 2588 // build on that. 2589 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 2590 SmallVectorImpl<unsigned> &Idxs, 2591 unsigned IdxSkip, 2592 Instruction *InsertBefore) { 2593 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType); 2594 if (STy) { 2595 // Save the original To argument so we can modify it 2596 Value *OrigTo = To; 2597 // General case, the type indexed by Idxs is a struct 2598 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2599 // Process each struct element recursively 2600 Idxs.push_back(i); 2601 Value *PrevTo = To; 2602 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 2603 InsertBefore); 2604 Idxs.pop_back(); 2605 if (!To) { 2606 // Couldn't find any inserted value for this index? Cleanup 2607 while (PrevTo != OrigTo) { 2608 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 2609 PrevTo = Del->getAggregateOperand(); 2610 Del->eraseFromParent(); 2611 } 2612 // Stop processing elements 2613 break; 2614 } 2615 } 2616 // If we successfully found a value for each of our subaggregates 2617 if (To) 2618 return To; 2619 } 2620 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 2621 // the struct's elements had a value that was inserted directly. In the latter 2622 // case, perhaps we can't determine each of the subelements individually, but 2623 // we might be able to find the complete struct somewhere. 2624 2625 // Find the value that is at that particular spot 2626 Value *V = FindInsertedValue(From, Idxs); 2627 2628 if (!V) 2629 return nullptr; 2630 2631 // Insert the value in the new (sub) aggregrate 2632 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 2633 "tmp", InsertBefore); 2634 } 2635 2636 // This helper takes a nested struct and extracts a part of it (which is again a 2637 // struct) into a new value. For example, given the struct: 2638 // { a, { b, { c, d }, e } } 2639 // and the indices "1, 1" this returns 2640 // { c, d }. 2641 // 2642 // It does this by inserting an insertvalue for each element in the resulting 2643 // struct, as opposed to just inserting a single struct. This will only work if 2644 // each of the elements of the substruct are known (ie, inserted into From by an 2645 // insertvalue instruction somewhere). 2646 // 2647 // All inserted insertvalue instructions are inserted before InsertBefore 2648 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 2649 Instruction *InsertBefore) { 2650 assert(InsertBefore && "Must have someplace to insert!"); 2651 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 2652 idx_range); 2653 Value *To = UndefValue::get(IndexedType); 2654 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 2655 unsigned IdxSkip = Idxs.size(); 2656 2657 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 2658 } 2659 2660 /// Given an aggregrate and an sequence of indices, see if 2661 /// the scalar value indexed is already around as a register, for example if it 2662 /// were inserted directly into the aggregrate. 2663 /// 2664 /// If InsertBefore is not null, this function will duplicate (modified) 2665 /// insertvalues when a part of a nested struct is extracted. 2666 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 2667 Instruction *InsertBefore) { 2668 // Nothing to index? Just return V then (this is useful at the end of our 2669 // recursion). 2670 if (idx_range.empty()) 2671 return V; 2672 // We have indices, so V should have an indexable type. 2673 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 2674 "Not looking at a struct or array?"); 2675 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 2676 "Invalid indices for type?"); 2677 2678 if (Constant *C = dyn_cast<Constant>(V)) { 2679 C = C->getAggregateElement(idx_range[0]); 2680 if (!C) return nullptr; 2681 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 2682 } 2683 2684 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 2685 // Loop the indices for the insertvalue instruction in parallel with the 2686 // requested indices 2687 const unsigned *req_idx = idx_range.begin(); 2688 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 2689 i != e; ++i, ++req_idx) { 2690 if (req_idx == idx_range.end()) { 2691 // We can't handle this without inserting insertvalues 2692 if (!InsertBefore) 2693 return nullptr; 2694 2695 // The requested index identifies a part of a nested aggregate. Handle 2696 // this specially. For example, 2697 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 2698 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 2699 // %C = extractvalue {i32, { i32, i32 } } %B, 1 2700 // This can be changed into 2701 // %A = insertvalue {i32, i32 } undef, i32 10, 0 2702 // %C = insertvalue {i32, i32 } %A, i32 11, 1 2703 // which allows the unused 0,0 element from the nested struct to be 2704 // removed. 2705 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 2706 InsertBefore); 2707 } 2708 2709 // This insert value inserts something else than what we are looking for. 2710 // See if the (aggregate) value inserted into has the value we are 2711 // looking for, then. 2712 if (*req_idx != *i) 2713 return FindInsertedValue(I->getAggregateOperand(), idx_range, 2714 InsertBefore); 2715 } 2716 // If we end up here, the indices of the insertvalue match with those 2717 // requested (though possibly only partially). Now we recursively look at 2718 // the inserted value, passing any remaining indices. 2719 return FindInsertedValue(I->getInsertedValueOperand(), 2720 makeArrayRef(req_idx, idx_range.end()), 2721 InsertBefore); 2722 } 2723 2724 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 2725 // If we're extracting a value from an aggregate that was extracted from 2726 // something else, we can extract from that something else directly instead. 2727 // However, we will need to chain I's indices with the requested indices. 2728 2729 // Calculate the number of indices required 2730 unsigned size = I->getNumIndices() + idx_range.size(); 2731 // Allocate some space to put the new indices in 2732 SmallVector<unsigned, 5> Idxs; 2733 Idxs.reserve(size); 2734 // Add indices from the extract value instruction 2735 Idxs.append(I->idx_begin(), I->idx_end()); 2736 2737 // Add requested indices 2738 Idxs.append(idx_range.begin(), idx_range.end()); 2739 2740 assert(Idxs.size() == size 2741 && "Number of indices added not correct?"); 2742 2743 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 2744 } 2745 // Otherwise, we don't know (such as, extracting from a function return value 2746 // or load instruction) 2747 return nullptr; 2748 } 2749 2750 /// Analyze the specified pointer to see if it can be expressed as a base 2751 /// pointer plus a constant offset. Return the base and offset to the caller. 2752 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 2753 const DataLayout &DL) { 2754 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType()); 2755 APInt ByteOffset(BitWidth, 0); 2756 2757 // We walk up the defs but use a visited set to handle unreachable code. In 2758 // that case, we stop after accumulating the cycle once (not that it 2759 // matters). 2760 SmallPtrSet<Value *, 16> Visited; 2761 while (Visited.insert(Ptr).second) { 2762 if (Ptr->getType()->isVectorTy()) 2763 break; 2764 2765 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 2766 APInt GEPOffset(BitWidth, 0); 2767 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 2768 break; 2769 2770 ByteOffset += GEPOffset; 2771 2772 Ptr = GEP->getPointerOperand(); 2773 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 2774 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 2775 Ptr = cast<Operator>(Ptr)->getOperand(0); 2776 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 2777 if (GA->isInterposable()) 2778 break; 2779 Ptr = GA->getAliasee(); 2780 } else { 2781 break; 2782 } 2783 } 2784 Offset = ByteOffset.getSExtValue(); 2785 return Ptr; 2786 } 2787 2788 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP) { 2789 // Make sure the GEP has exactly three arguments. 2790 if (GEP->getNumOperands() != 3) 2791 return false; 2792 2793 // Make sure the index-ee is a pointer to array of i8. 2794 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 2795 if (!AT || !AT->getElementType()->isIntegerTy(8)) 2796 return false; 2797 2798 // Check to make sure that the first operand of the GEP is an integer and 2799 // has value 0 so that we are sure we're indexing into the initializer. 2800 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 2801 if (!FirstIdx || !FirstIdx->isZero()) 2802 return false; 2803 2804 return true; 2805 } 2806 2807 /// This function computes the length of a null-terminated C string pointed to 2808 /// by V. If successful, it returns true and returns the string in Str. 2809 /// If unsuccessful, it returns false. 2810 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 2811 uint64_t Offset, bool TrimAtNul) { 2812 assert(V); 2813 2814 // Look through bitcast instructions and geps. 2815 V = V->stripPointerCasts(); 2816 2817 // If the value is a GEP instruction or constant expression, treat it as an 2818 // offset. 2819 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 2820 // The GEP operator should be based on a pointer to string constant, and is 2821 // indexing into the string constant. 2822 if (!isGEPBasedOnPointerToString(GEP)) 2823 return false; 2824 2825 // If the second index isn't a ConstantInt, then this is a variable index 2826 // into the array. If this occurs, we can't say anything meaningful about 2827 // the string. 2828 uint64_t StartIdx = 0; 2829 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 2830 StartIdx = CI->getZExtValue(); 2831 else 2832 return false; 2833 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset, 2834 TrimAtNul); 2835 } 2836 2837 // The GEP instruction, constant or instruction, must reference a global 2838 // variable that is a constant and is initialized. The referenced constant 2839 // initializer is the array that we'll use for optimization. 2840 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 2841 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 2842 return false; 2843 2844 // Handle the all-zeros case. 2845 if (GV->getInitializer()->isNullValue()) { 2846 // This is a degenerate case. The initializer is constant zero so the 2847 // length of the string must be zero. 2848 Str = ""; 2849 return true; 2850 } 2851 2852 // This must be a ConstantDataArray. 2853 const auto *Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 2854 if (!Array || !Array->isString()) 2855 return false; 2856 2857 // Get the number of elements in the array. 2858 uint64_t NumElts = Array->getType()->getArrayNumElements(); 2859 2860 // Start out with the entire array in the StringRef. 2861 Str = Array->getAsString(); 2862 2863 if (Offset > NumElts) 2864 return false; 2865 2866 // Skip over 'offset' bytes. 2867 Str = Str.substr(Offset); 2868 2869 if (TrimAtNul) { 2870 // Trim off the \0 and anything after it. If the array is not nul 2871 // terminated, we just return the whole end of string. The client may know 2872 // some other way that the string is length-bound. 2873 Str = Str.substr(0, Str.find('\0')); 2874 } 2875 return true; 2876 } 2877 2878 // These next two are very similar to the above, but also look through PHI 2879 // nodes. 2880 // TODO: See if we can integrate these two together. 2881 2882 /// If we can compute the length of the string pointed to by 2883 /// the specified pointer, return 'len+1'. If we can't, return 0. 2884 static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) { 2885 // Look through noop bitcast instructions. 2886 V = V->stripPointerCasts(); 2887 2888 // If this is a PHI node, there are two cases: either we have already seen it 2889 // or we haven't. 2890 if (PHINode *PN = dyn_cast<PHINode>(V)) { 2891 if (!PHIs.insert(PN).second) 2892 return ~0ULL; // already in the set. 2893 2894 // If it was new, see if all the input strings are the same length. 2895 uint64_t LenSoFar = ~0ULL; 2896 for (Value *IncValue : PN->incoming_values()) { 2897 uint64_t Len = GetStringLengthH(IncValue, PHIs); 2898 if (Len == 0) return 0; // Unknown length -> unknown. 2899 2900 if (Len == ~0ULL) continue; 2901 2902 if (Len != LenSoFar && LenSoFar != ~0ULL) 2903 return 0; // Disagree -> unknown. 2904 LenSoFar = Len; 2905 } 2906 2907 // Success, all agree. 2908 return LenSoFar; 2909 } 2910 2911 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 2912 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 2913 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs); 2914 if (Len1 == 0) return 0; 2915 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs); 2916 if (Len2 == 0) return 0; 2917 if (Len1 == ~0ULL) return Len2; 2918 if (Len2 == ~0ULL) return Len1; 2919 if (Len1 != Len2) return 0; 2920 return Len1; 2921 } 2922 2923 // Otherwise, see if we can read the string. 2924 StringRef StrData; 2925 if (!getConstantStringInfo(V, StrData)) 2926 return 0; 2927 2928 return StrData.size()+1; 2929 } 2930 2931 /// If we can compute the length of the string pointed to by 2932 /// the specified pointer, return 'len+1'. If we can't, return 0. 2933 uint64_t llvm::GetStringLength(Value *V) { 2934 if (!V->getType()->isPointerTy()) return 0; 2935 2936 SmallPtrSet<PHINode*, 32> PHIs; 2937 uint64_t Len = GetStringLengthH(V, PHIs); 2938 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 2939 // an empty string as a length. 2940 return Len == ~0ULL ? 1 : Len; 2941 } 2942 2943 /// \brief \p PN defines a loop-variant pointer to an object. Check if the 2944 /// previous iteration of the loop was referring to the same object as \p PN. 2945 static bool isSameUnderlyingObjectInLoop(PHINode *PN, LoopInfo *LI) { 2946 // Find the loop-defined value. 2947 Loop *L = LI->getLoopFor(PN->getParent()); 2948 if (PN->getNumIncomingValues() != 2) 2949 return true; 2950 2951 // Find the value from previous iteration. 2952 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 2953 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 2954 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 2955 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 2956 return true; 2957 2958 // If a new pointer is loaded in the loop, the pointer references a different 2959 // object in every iteration. E.g.: 2960 // for (i) 2961 // int *p = a[i]; 2962 // ... 2963 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 2964 if (!L->isLoopInvariant(Load->getPointerOperand())) 2965 return false; 2966 return true; 2967 } 2968 2969 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 2970 unsigned MaxLookup) { 2971 if (!V->getType()->isPointerTy()) 2972 return V; 2973 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 2974 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 2975 V = GEP->getPointerOperand(); 2976 } else if (Operator::getOpcode(V) == Instruction::BitCast || 2977 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 2978 V = cast<Operator>(V)->getOperand(0); 2979 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 2980 if (GA->isInterposable()) 2981 return V; 2982 V = GA->getAliasee(); 2983 } else { 2984 // See if InstructionSimplify knows any relevant tricks. 2985 if (Instruction *I = dyn_cast<Instruction>(V)) 2986 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 2987 if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) { 2988 V = Simplified; 2989 continue; 2990 } 2991 2992 return V; 2993 } 2994 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 2995 } 2996 return V; 2997 } 2998 2999 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 3000 const DataLayout &DL, LoopInfo *LI, 3001 unsigned MaxLookup) { 3002 SmallPtrSet<Value *, 4> Visited; 3003 SmallVector<Value *, 4> Worklist; 3004 Worklist.push_back(V); 3005 do { 3006 Value *P = Worklist.pop_back_val(); 3007 P = GetUnderlyingObject(P, DL, MaxLookup); 3008 3009 if (!Visited.insert(P).second) 3010 continue; 3011 3012 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 3013 Worklist.push_back(SI->getTrueValue()); 3014 Worklist.push_back(SI->getFalseValue()); 3015 continue; 3016 } 3017 3018 if (PHINode *PN = dyn_cast<PHINode>(P)) { 3019 // If this PHI changes the underlying object in every iteration of the 3020 // loop, don't look through it. Consider: 3021 // int **A; 3022 // for (i) { 3023 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 3024 // Curr = A[i]; 3025 // *Prev, *Curr; 3026 // 3027 // Prev is tracking Curr one iteration behind so they refer to different 3028 // underlying objects. 3029 if (!LI || !LI->isLoopHeader(PN->getParent()) || 3030 isSameUnderlyingObjectInLoop(PN, LI)) 3031 for (Value *IncValue : PN->incoming_values()) 3032 Worklist.push_back(IncValue); 3033 continue; 3034 } 3035 3036 Objects.push_back(P); 3037 } while (!Worklist.empty()); 3038 } 3039 3040 /// Return true if the only users of this pointer are lifetime markers. 3041 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 3042 for (const User *U : V->users()) { 3043 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3044 if (!II) return false; 3045 3046 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 3047 II->getIntrinsicID() != Intrinsic::lifetime_end) 3048 return false; 3049 } 3050 return true; 3051 } 3052 3053 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3054 const Instruction *CtxI, 3055 const DominatorTree *DT, 3056 const TargetLibraryInfo *TLI) { 3057 const Operator *Inst = dyn_cast<Operator>(V); 3058 if (!Inst) 3059 return false; 3060 3061 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3062 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3063 if (C->canTrap()) 3064 return false; 3065 3066 switch (Inst->getOpcode()) { 3067 default: 3068 return true; 3069 case Instruction::UDiv: 3070 case Instruction::URem: { 3071 // x / y is undefined if y == 0. 3072 const APInt *V; 3073 if (match(Inst->getOperand(1), m_APInt(V))) 3074 return *V != 0; 3075 return false; 3076 } 3077 case Instruction::SDiv: 3078 case Instruction::SRem: { 3079 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3080 const APInt *Numerator, *Denominator; 3081 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3082 return false; 3083 // We cannot hoist this division if the denominator is 0. 3084 if (*Denominator == 0) 3085 return false; 3086 // It's safe to hoist if the denominator is not 0 or -1. 3087 if (*Denominator != -1) 3088 return true; 3089 // At this point we know that the denominator is -1. It is safe to hoist as 3090 // long we know that the numerator is not INT_MIN. 3091 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3092 return !Numerator->isMinSignedValue(); 3093 // The numerator *might* be MinSignedValue. 3094 return false; 3095 } 3096 case Instruction::Load: { 3097 const LoadInst *LI = cast<LoadInst>(Inst); 3098 if (!LI->isUnordered() || 3099 // Speculative load may create a race that did not exist in the source. 3100 LI->getParent()->getParent()->hasFnAttribute( 3101 Attribute::SanitizeThread) || 3102 // Speculative load may load data from dirty regions. 3103 LI->getParent()->getParent()->hasFnAttribute( 3104 Attribute::SanitizeAddress)) 3105 return false; 3106 const DataLayout &DL = LI->getModule()->getDataLayout(); 3107 return isDereferenceableAndAlignedPointer( 3108 LI->getPointerOperand(), LI->getAlignment(), DL, CtxI, DT, TLI); 3109 } 3110 case Instruction::Call: { 3111 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 3112 switch (II->getIntrinsicID()) { 3113 // These synthetic intrinsics have no side-effects and just mark 3114 // information about their operands. 3115 // FIXME: There are other no-op synthetic instructions that potentially 3116 // should be considered at least *safe* to speculate... 3117 case Intrinsic::dbg_declare: 3118 case Intrinsic::dbg_value: 3119 return true; 3120 3121 case Intrinsic::bswap: 3122 case Intrinsic::ctlz: 3123 case Intrinsic::ctpop: 3124 case Intrinsic::cttz: 3125 case Intrinsic::objectsize: 3126 case Intrinsic::sadd_with_overflow: 3127 case Intrinsic::smul_with_overflow: 3128 case Intrinsic::ssub_with_overflow: 3129 case Intrinsic::uadd_with_overflow: 3130 case Intrinsic::umul_with_overflow: 3131 case Intrinsic::usub_with_overflow: 3132 return true; 3133 // These intrinsics are defined to have the same behavior as libm 3134 // functions except for setting errno. 3135 case Intrinsic::sqrt: 3136 case Intrinsic::fma: 3137 case Intrinsic::fmuladd: 3138 return true; 3139 // These intrinsics are defined to have the same behavior as libm 3140 // functions, and the corresponding libm functions never set errno. 3141 case Intrinsic::trunc: 3142 case Intrinsic::copysign: 3143 case Intrinsic::fabs: 3144 case Intrinsic::minnum: 3145 case Intrinsic::maxnum: 3146 return true; 3147 // These intrinsics are defined to have the same behavior as libm 3148 // functions, which never overflow when operating on the IEEE754 types 3149 // that we support, and never set errno otherwise. 3150 case Intrinsic::ceil: 3151 case Intrinsic::floor: 3152 case Intrinsic::nearbyint: 3153 case Intrinsic::rint: 3154 case Intrinsic::round: 3155 return true; 3156 // TODO: are convert_{from,to}_fp16 safe? 3157 // TODO: can we list target-specific intrinsics here? 3158 default: break; 3159 } 3160 } 3161 return false; // The called function could have undefined behavior or 3162 // side-effects, even if marked readnone nounwind. 3163 } 3164 case Instruction::VAArg: 3165 case Instruction::Alloca: 3166 case Instruction::Invoke: 3167 case Instruction::PHI: 3168 case Instruction::Store: 3169 case Instruction::Ret: 3170 case Instruction::Br: 3171 case Instruction::IndirectBr: 3172 case Instruction::Switch: 3173 case Instruction::Unreachable: 3174 case Instruction::Fence: 3175 case Instruction::AtomicRMW: 3176 case Instruction::AtomicCmpXchg: 3177 case Instruction::LandingPad: 3178 case Instruction::Resume: 3179 case Instruction::CatchSwitch: 3180 case Instruction::CatchPad: 3181 case Instruction::CatchRet: 3182 case Instruction::CleanupPad: 3183 case Instruction::CleanupRet: 3184 return false; // Misc instructions which have effects 3185 } 3186 } 3187 3188 bool llvm::mayBeMemoryDependent(const Instruction &I) { 3189 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 3190 } 3191 3192 /// Return true if we know that the specified value is never null. 3193 bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) { 3194 assert(V->getType()->isPointerTy() && "V must be pointer type"); 3195 3196 // Alloca never returns null, malloc might. 3197 if (isa<AllocaInst>(V)) return true; 3198 3199 // A byval, inalloca, or nonnull argument is never null. 3200 if (const Argument *A = dyn_cast<Argument>(V)) 3201 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr(); 3202 3203 // A global variable in address space 0 is non null unless extern weak. 3204 // Other address spaces may have null as a valid address for a global, 3205 // so we can't assume anything. 3206 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 3207 return !GV->hasExternalWeakLinkage() && 3208 GV->getType()->getAddressSpace() == 0; 3209 3210 // A Load tagged with nonnull metadata is never null. 3211 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 3212 return LI->getMetadata(LLVMContext::MD_nonnull); 3213 3214 if (auto CS = ImmutableCallSite(V)) 3215 if (CS.isReturnNonNull()) 3216 return true; 3217 3218 return false; 3219 } 3220 3221 static bool isKnownNonNullFromDominatingCondition(const Value *V, 3222 const Instruction *CtxI, 3223 const DominatorTree *DT) { 3224 assert(V->getType()->isPointerTy() && "V must be pointer type"); 3225 3226 unsigned NumUsesExplored = 0; 3227 for (auto *U : V->users()) { 3228 // Avoid massive lists 3229 if (NumUsesExplored >= DomConditionsMaxUses) 3230 break; 3231 NumUsesExplored++; 3232 // Consider only compare instructions uniquely controlling a branch 3233 CmpInst::Predicate Pred; 3234 if (!match(const_cast<User *>(U), 3235 m_c_ICmp(Pred, m_Specific(V), m_Zero())) || 3236 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) 3237 continue; 3238 3239 for (auto *CmpU : U->users()) { 3240 if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) { 3241 assert(BI->isConditional() && "uses a comparison!"); 3242 3243 BasicBlock *NonNullSuccessor = 3244 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); 3245 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 3246 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 3247 return true; 3248 } else if (Pred == ICmpInst::ICMP_NE && 3249 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) && 3250 DT->dominates(cast<Instruction>(CmpU), CtxI)) { 3251 return true; 3252 } 3253 } 3254 } 3255 3256 return false; 3257 } 3258 3259 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI, 3260 const DominatorTree *DT, const TargetLibraryInfo *TLI) { 3261 if (isKnownNonNull(V, TLI)) 3262 return true; 3263 3264 return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false; 3265 } 3266 3267 OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS, 3268 const DataLayout &DL, 3269 AssumptionCache *AC, 3270 const Instruction *CxtI, 3271 const DominatorTree *DT) { 3272 // Multiplying n * m significant bits yields a result of n + m significant 3273 // bits. If the total number of significant bits does not exceed the 3274 // result bit width (minus 1), there is no overflow. 3275 // This means if we have enough leading zero bits in the operands 3276 // we can guarantee that the result does not overflow. 3277 // Ref: "Hacker's Delight" by Henry Warren 3278 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3279 APInt LHSKnownZero(BitWidth, 0); 3280 APInt LHSKnownOne(BitWidth, 0); 3281 APInt RHSKnownZero(BitWidth, 0); 3282 APInt RHSKnownOne(BitWidth, 0); 3283 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI, 3284 DT); 3285 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI, 3286 DT); 3287 // Note that underestimating the number of zero bits gives a more 3288 // conservative answer. 3289 unsigned ZeroBits = LHSKnownZero.countLeadingOnes() + 3290 RHSKnownZero.countLeadingOnes(); 3291 // First handle the easy case: if we have enough zero bits there's 3292 // definitely no overflow. 3293 if (ZeroBits >= BitWidth) 3294 return OverflowResult::NeverOverflows; 3295 3296 // Get the largest possible values for each operand. 3297 APInt LHSMax = ~LHSKnownZero; 3298 APInt RHSMax = ~RHSKnownZero; 3299 3300 // We know the multiply operation doesn't overflow if the maximum values for 3301 // each operand will not overflow after we multiply them together. 3302 bool MaxOverflow; 3303 LHSMax.umul_ov(RHSMax, MaxOverflow); 3304 if (!MaxOverflow) 3305 return OverflowResult::NeverOverflows; 3306 3307 // We know it always overflows if multiplying the smallest possible values for 3308 // the operands also results in overflow. 3309 bool MinOverflow; 3310 LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow); 3311 if (MinOverflow) 3312 return OverflowResult::AlwaysOverflows; 3313 3314 return OverflowResult::MayOverflow; 3315 } 3316 3317 OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS, 3318 const DataLayout &DL, 3319 AssumptionCache *AC, 3320 const Instruction *CxtI, 3321 const DominatorTree *DT) { 3322 bool LHSKnownNonNegative, LHSKnownNegative; 3323 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0, 3324 AC, CxtI, DT); 3325 if (LHSKnownNonNegative || LHSKnownNegative) { 3326 bool RHSKnownNonNegative, RHSKnownNegative; 3327 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0, 3328 AC, CxtI, DT); 3329 3330 if (LHSKnownNegative && RHSKnownNegative) { 3331 // The sign bit is set in both cases: this MUST overflow. 3332 // Create a simple add instruction, and insert it into the struct. 3333 return OverflowResult::AlwaysOverflows; 3334 } 3335 3336 if (LHSKnownNonNegative && RHSKnownNonNegative) { 3337 // The sign bit is clear in both cases: this CANNOT overflow. 3338 // Create a simple add instruction, and insert it into the struct. 3339 return OverflowResult::NeverOverflows; 3340 } 3341 } 3342 3343 return OverflowResult::MayOverflow; 3344 } 3345 3346 static OverflowResult computeOverflowForSignedAdd( 3347 Value *LHS, Value *RHS, AddOperator *Add, const DataLayout &DL, 3348 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT) { 3349 if (Add && Add->hasNoSignedWrap()) { 3350 return OverflowResult::NeverOverflows; 3351 } 3352 3353 bool LHSKnownNonNegative, LHSKnownNegative; 3354 bool RHSKnownNonNegative, RHSKnownNegative; 3355 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0, 3356 AC, CxtI, DT); 3357 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0, 3358 AC, CxtI, DT); 3359 3360 if ((LHSKnownNonNegative && RHSKnownNegative) || 3361 (LHSKnownNegative && RHSKnownNonNegative)) { 3362 // The sign bits are opposite: this CANNOT overflow. 3363 return OverflowResult::NeverOverflows; 3364 } 3365 3366 // The remaining code needs Add to be available. Early returns if not so. 3367 if (!Add) 3368 return OverflowResult::MayOverflow; 3369 3370 // If the sign of Add is the same as at least one of the operands, this add 3371 // CANNOT overflow. This is particularly useful when the sum is 3372 // @llvm.assume'ed non-negative rather than proved so from analyzing its 3373 // operands. 3374 bool LHSOrRHSKnownNonNegative = 3375 (LHSKnownNonNegative || RHSKnownNonNegative); 3376 bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative); 3377 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 3378 bool AddKnownNonNegative, AddKnownNegative; 3379 ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL, 3380 /*Depth=*/0, AC, CxtI, DT); 3381 if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) || 3382 (AddKnownNegative && LHSOrRHSKnownNegative)) { 3383 return OverflowResult::NeverOverflows; 3384 } 3385 } 3386 3387 return OverflowResult::MayOverflow; 3388 } 3389 3390 bool llvm::isOverflowIntrinsicNoWrap(IntrinsicInst *II, DominatorTree &DT) { 3391 #ifndef NDEBUG 3392 auto IID = II->getIntrinsicID(); 3393 assert((IID == Intrinsic::sadd_with_overflow || 3394 IID == Intrinsic::uadd_with_overflow || 3395 IID == Intrinsic::ssub_with_overflow || 3396 IID == Intrinsic::usub_with_overflow || 3397 IID == Intrinsic::smul_with_overflow || 3398 IID == Intrinsic::umul_with_overflow) && 3399 "Not an overflow intrinsic!"); 3400 #endif 3401 3402 SmallVector<BranchInst *, 2> GuardingBranches; 3403 SmallVector<ExtractValueInst *, 2> Results; 3404 3405 for (User *U : II->users()) { 3406 if (auto *EVI = dyn_cast<ExtractValueInst>(U)) { 3407 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 3408 3409 if (EVI->getIndices()[0] == 0) 3410 Results.push_back(EVI); 3411 else { 3412 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 3413 3414 for (auto *U : EVI->users()) 3415 if (auto *B = dyn_cast<BranchInst>(U)) { 3416 assert(B->isConditional() && "How else is it using an i1?"); 3417 GuardingBranches.push_back(B); 3418 } 3419 } 3420 } else { 3421 // We are using the aggregate directly in a way we don't want to analyze 3422 // here (storing it to a global, say). 3423 return false; 3424 } 3425 } 3426 3427 auto AllUsesGuardedByBranch = [&](BranchInst *BI) { 3428 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 3429 if (!NoWrapEdge.isSingleEdge()) 3430 return false; 3431 3432 // Check if all users of the add are provably no-wrap. 3433 for (auto *Result : Results) { 3434 // If the extractvalue itself is not executed on overflow, the we don't 3435 // need to check each use separately, since domination is transitive. 3436 if (DT.dominates(NoWrapEdge, Result->getParent())) 3437 continue; 3438 3439 for (auto &RU : Result->uses()) 3440 if (!DT.dominates(NoWrapEdge, RU)) 3441 return false; 3442 } 3443 3444 return true; 3445 }; 3446 3447 return any_of(GuardingBranches, AllUsesGuardedByBranch); 3448 } 3449 3450 3451 OverflowResult llvm::computeOverflowForSignedAdd(AddOperator *Add, 3452 const DataLayout &DL, 3453 AssumptionCache *AC, 3454 const Instruction *CxtI, 3455 const DominatorTree *DT) { 3456 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 3457 Add, DL, AC, CxtI, DT); 3458 } 3459 3460 OverflowResult llvm::computeOverflowForSignedAdd(Value *LHS, Value *RHS, 3461 const DataLayout &DL, 3462 AssumptionCache *AC, 3463 const Instruction *CxtI, 3464 const DominatorTree *DT) { 3465 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 3466 } 3467 3468 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 3469 // A memory operation returns normally if it isn't volatile. A volatile 3470 // operation is allowed to trap. 3471 // 3472 // An atomic operation isn't guaranteed to return in a reasonable amount of 3473 // time because it's possible for another thread to interfere with it for an 3474 // arbitrary length of time, but programs aren't allowed to rely on that. 3475 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 3476 return !LI->isVolatile(); 3477 if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 3478 return !SI->isVolatile(); 3479 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 3480 return !CXI->isVolatile(); 3481 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 3482 return !RMWI->isVolatile(); 3483 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I)) 3484 return !MII->isVolatile(); 3485 3486 // If there is no successor, then execution can't transfer to it. 3487 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) 3488 return !CRI->unwindsToCaller(); 3489 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) 3490 return !CatchSwitch->unwindsToCaller(); 3491 if (isa<ResumeInst>(I)) 3492 return false; 3493 if (isa<ReturnInst>(I)) 3494 return false; 3495 3496 // Calls can throw, or contain an infinite loop, or kill the process. 3497 if (CallSite CS = CallSite(const_cast<Instruction*>(I))) { 3498 // Calls which don't write to arbitrary memory are safe. 3499 // FIXME: Ignoring infinite loops without any side-effects is too aggressive, 3500 // but it's consistent with other passes. See http://llvm.org/PR965 . 3501 // FIXME: This isn't aggressive enough; a call which only writes to a 3502 // global is guaranteed to return. 3503 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() || 3504 match(I, m_Intrinsic<Intrinsic::assume>()); 3505 } 3506 3507 // Other instructions return normally. 3508 return true; 3509 } 3510 3511 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 3512 const Loop *L) { 3513 // The loop header is guaranteed to be executed for every iteration. 3514 // 3515 // FIXME: Relax this constraint to cover all basic blocks that are 3516 // guaranteed to be executed at every iteration. 3517 if (I->getParent() != L->getHeader()) return false; 3518 3519 for (const Instruction &LI : *L->getHeader()) { 3520 if (&LI == I) return true; 3521 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 3522 } 3523 llvm_unreachable("Instruction not contained in its own parent basic block."); 3524 } 3525 3526 bool llvm::propagatesFullPoison(const Instruction *I) { 3527 switch (I->getOpcode()) { 3528 case Instruction::Add: 3529 case Instruction::Sub: 3530 case Instruction::Xor: 3531 case Instruction::Trunc: 3532 case Instruction::BitCast: 3533 case Instruction::AddrSpaceCast: 3534 // These operations all propagate poison unconditionally. Note that poison 3535 // is not any particular value, so xor or subtraction of poison with 3536 // itself still yields poison, not zero. 3537 return true; 3538 3539 case Instruction::AShr: 3540 case Instruction::SExt: 3541 // For these operations, one bit of the input is replicated across 3542 // multiple output bits. A replicated poison bit is still poison. 3543 return true; 3544 3545 case Instruction::Shl: { 3546 // Left shift *by* a poison value is poison. The number of 3547 // positions to shift is unsigned, so no negative values are 3548 // possible there. Left shift by zero places preserves poison. So 3549 // it only remains to consider left shift of poison by a positive 3550 // number of places. 3551 // 3552 // A left shift by a positive number of places leaves the lowest order bit 3553 // non-poisoned. However, if such a shift has a no-wrap flag, then we can 3554 // make the poison operand violate that flag, yielding a fresh full-poison 3555 // value. 3556 auto *OBO = cast<OverflowingBinaryOperator>(I); 3557 return OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap(); 3558 } 3559 3560 case Instruction::Mul: { 3561 // A multiplication by zero yields a non-poison zero result, so we need to 3562 // rule out zero as an operand. Conservatively, multiplication by a 3563 // non-zero constant is not multiplication by zero. 3564 // 3565 // Multiplication by a non-zero constant can leave some bits 3566 // non-poisoned. For example, a multiplication by 2 leaves the lowest 3567 // order bit unpoisoned. So we need to consider that. 3568 // 3569 // Multiplication by 1 preserves poison. If the multiplication has a 3570 // no-wrap flag, then we can make the poison operand violate that flag 3571 // when multiplied by any integer other than 0 and 1. 3572 auto *OBO = cast<OverflowingBinaryOperator>(I); 3573 if (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) { 3574 for (Value *V : OBO->operands()) { 3575 if (auto *CI = dyn_cast<ConstantInt>(V)) { 3576 // A ConstantInt cannot yield poison, so we can assume that it is 3577 // the other operand that is poison. 3578 return !CI->isZero(); 3579 } 3580 } 3581 } 3582 return false; 3583 } 3584 3585 case Instruction::ICmp: 3586 // Comparing poison with any value yields poison. This is why, for 3587 // instance, x s< (x +nsw 1) can be folded to true. 3588 return true; 3589 3590 case Instruction::GetElementPtr: 3591 // A GEP implicitly represents a sequence of additions, subtractions, 3592 // truncations, sign extensions and multiplications. The multiplications 3593 // are by the non-zero sizes of some set of types, so we do not have to be 3594 // concerned with multiplication by zero. If the GEP is in-bounds, then 3595 // these operations are implicitly no-signed-wrap so poison is propagated 3596 // by the arguments above for Add, Sub, Trunc, SExt and Mul. 3597 return cast<GEPOperator>(I)->isInBounds(); 3598 3599 default: 3600 return false; 3601 } 3602 } 3603 3604 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) { 3605 switch (I->getOpcode()) { 3606 case Instruction::Store: 3607 return cast<StoreInst>(I)->getPointerOperand(); 3608 3609 case Instruction::Load: 3610 return cast<LoadInst>(I)->getPointerOperand(); 3611 3612 case Instruction::AtomicCmpXchg: 3613 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 3614 3615 case Instruction::AtomicRMW: 3616 return cast<AtomicRMWInst>(I)->getPointerOperand(); 3617 3618 case Instruction::UDiv: 3619 case Instruction::SDiv: 3620 case Instruction::URem: 3621 case Instruction::SRem: 3622 return I->getOperand(1); 3623 3624 default: 3625 return nullptr; 3626 } 3627 } 3628 3629 bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) { 3630 // We currently only look for uses of poison values within the same basic 3631 // block, as that makes it easier to guarantee that the uses will be 3632 // executed given that PoisonI is executed. 3633 // 3634 // FIXME: Expand this to consider uses beyond the same basic block. To do 3635 // this, look out for the distinction between post-dominance and strong 3636 // post-dominance. 3637 const BasicBlock *BB = PoisonI->getParent(); 3638 3639 // Set of instructions that we have proved will yield poison if PoisonI 3640 // does. 3641 SmallSet<const Value *, 16> YieldsPoison; 3642 SmallSet<const BasicBlock *, 4> Visited; 3643 YieldsPoison.insert(PoisonI); 3644 Visited.insert(PoisonI->getParent()); 3645 3646 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end(); 3647 3648 unsigned Iter = 0; 3649 while (Iter++ < MaxDepth) { 3650 for (auto &I : make_range(Begin, End)) { 3651 if (&I != PoisonI) { 3652 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I); 3653 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) 3654 return true; 3655 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 3656 return false; 3657 } 3658 3659 // Mark poison that propagates from I through uses of I. 3660 if (YieldsPoison.count(&I)) { 3661 for (const User *User : I.users()) { 3662 const Instruction *UserI = cast<Instruction>(User); 3663 if (propagatesFullPoison(UserI)) 3664 YieldsPoison.insert(User); 3665 } 3666 } 3667 } 3668 3669 if (auto *NextBB = BB->getSingleSuccessor()) { 3670 if (Visited.insert(NextBB).second) { 3671 BB = NextBB; 3672 Begin = BB->getFirstNonPHI()->getIterator(); 3673 End = BB->end(); 3674 continue; 3675 } 3676 } 3677 3678 break; 3679 }; 3680 return false; 3681 } 3682 3683 static bool isKnownNonNaN(Value *V, FastMathFlags FMF) { 3684 if (FMF.noNaNs()) 3685 return true; 3686 3687 if (auto *C = dyn_cast<ConstantFP>(V)) 3688 return !C->isNaN(); 3689 return false; 3690 } 3691 3692 static bool isKnownNonZero(Value *V) { 3693 if (auto *C = dyn_cast<ConstantFP>(V)) 3694 return !C->isZero(); 3695 return false; 3696 } 3697 3698 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 3699 FastMathFlags FMF, 3700 Value *CmpLHS, Value *CmpRHS, 3701 Value *TrueVal, Value *FalseVal, 3702 Value *&LHS, Value *&RHS) { 3703 LHS = CmpLHS; 3704 RHS = CmpRHS; 3705 3706 // If the predicate is an "or-equal" (FP) predicate, then signed zeroes may 3707 // return inconsistent results between implementations. 3708 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 3709 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 3710 // Therefore we behave conservatively and only proceed if at least one of the 3711 // operands is known to not be zero, or if we don't care about signed zeroes. 3712 switch (Pred) { 3713 default: break; 3714 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 3715 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 3716 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 3717 !isKnownNonZero(CmpRHS)) 3718 return {SPF_UNKNOWN, SPNB_NA, false}; 3719 } 3720 3721 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 3722 bool Ordered = false; 3723 3724 // When given one NaN and one non-NaN input: 3725 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 3726 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 3727 // ordered comparison fails), which could be NaN or non-NaN. 3728 // so here we discover exactly what NaN behavior is required/accepted. 3729 if (CmpInst::isFPPredicate(Pred)) { 3730 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 3731 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 3732 3733 if (LHSSafe && RHSSafe) { 3734 // Both operands are known non-NaN. 3735 NaNBehavior = SPNB_RETURNS_ANY; 3736 } else if (CmpInst::isOrdered(Pred)) { 3737 // An ordered comparison will return false when given a NaN, so it 3738 // returns the RHS. 3739 Ordered = true; 3740 if (LHSSafe) 3741 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 3742 NaNBehavior = SPNB_RETURNS_NAN; 3743 else if (RHSSafe) 3744 NaNBehavior = SPNB_RETURNS_OTHER; 3745 else 3746 // Completely unsafe. 3747 return {SPF_UNKNOWN, SPNB_NA, false}; 3748 } else { 3749 Ordered = false; 3750 // An unordered comparison will return true when given a NaN, so it 3751 // returns the LHS. 3752 if (LHSSafe) 3753 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 3754 NaNBehavior = SPNB_RETURNS_OTHER; 3755 else if (RHSSafe) 3756 NaNBehavior = SPNB_RETURNS_NAN; 3757 else 3758 // Completely unsafe. 3759 return {SPF_UNKNOWN, SPNB_NA, false}; 3760 } 3761 } 3762 3763 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 3764 std::swap(CmpLHS, CmpRHS); 3765 Pred = CmpInst::getSwappedPredicate(Pred); 3766 if (NaNBehavior == SPNB_RETURNS_NAN) 3767 NaNBehavior = SPNB_RETURNS_OTHER; 3768 else if (NaNBehavior == SPNB_RETURNS_OTHER) 3769 NaNBehavior = SPNB_RETURNS_NAN; 3770 Ordered = !Ordered; 3771 } 3772 3773 // ([if]cmp X, Y) ? X : Y 3774 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 3775 switch (Pred) { 3776 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 3777 case ICmpInst::ICMP_UGT: 3778 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 3779 case ICmpInst::ICMP_SGT: 3780 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 3781 case ICmpInst::ICMP_ULT: 3782 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 3783 case ICmpInst::ICMP_SLT: 3784 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 3785 case FCmpInst::FCMP_UGT: 3786 case FCmpInst::FCMP_UGE: 3787 case FCmpInst::FCMP_OGT: 3788 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 3789 case FCmpInst::FCMP_ULT: 3790 case FCmpInst::FCMP_ULE: 3791 case FCmpInst::FCMP_OLT: 3792 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 3793 } 3794 } 3795 3796 if (ConstantInt *C1 = dyn_cast<ConstantInt>(CmpRHS)) { 3797 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) || 3798 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) { 3799 3800 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X 3801 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X 3802 if (Pred == ICmpInst::ICMP_SGT && (C1->isZero() || C1->isMinusOne())) { 3803 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 3804 } 3805 3806 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X 3807 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X 3808 if (Pred == ICmpInst::ICMP_SLT && (C1->isZero() || C1->isOne())) { 3809 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 3810 } 3811 } 3812 3813 // Y >s C ? ~Y : ~C == ~Y <s ~C ? ~Y : ~C = SMIN(~Y, ~C) 3814 if (const auto *C2 = dyn_cast<ConstantInt>(FalseVal)) { 3815 if (Pred == ICmpInst::ICMP_SGT && C1->getType() == C2->getType() && 3816 ~C1->getValue() == C2->getValue() && 3817 (match(TrueVal, m_Not(m_Specific(CmpLHS))) || 3818 match(CmpLHS, m_Not(m_Specific(TrueVal))))) { 3819 LHS = TrueVal; 3820 RHS = FalseVal; 3821 return {SPF_SMIN, SPNB_NA, false}; 3822 } 3823 } 3824 } 3825 3826 // TODO: (X > 4) ? X : 5 --> (X >= 5) ? X : 5 --> MAX(X, 5) 3827 3828 return {SPF_UNKNOWN, SPNB_NA, false}; 3829 } 3830 3831 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 3832 Instruction::CastOps *CastOp) { 3833 CastInst *CI = dyn_cast<CastInst>(V1); 3834 Constant *C = dyn_cast<Constant>(V2); 3835 if (!CI) 3836 return nullptr; 3837 *CastOp = CI->getOpcode(); 3838 3839 if (auto *CI2 = dyn_cast<CastInst>(V2)) { 3840 // If V1 and V2 are both the same cast from the same type, we can look 3841 // through V1. 3842 if (CI2->getOpcode() == CI->getOpcode() && 3843 CI2->getSrcTy() == CI->getSrcTy()) 3844 return CI2->getOperand(0); 3845 return nullptr; 3846 } else if (!C) { 3847 return nullptr; 3848 } 3849 3850 Constant *CastedTo = nullptr; 3851 3852 if (isa<ZExtInst>(CI) && CmpI->isUnsigned()) 3853 CastedTo = ConstantExpr::getTrunc(C, CI->getSrcTy()); 3854 3855 if (isa<SExtInst>(CI) && CmpI->isSigned()) 3856 CastedTo = ConstantExpr::getTrunc(C, CI->getSrcTy(), true); 3857 3858 if (isa<TruncInst>(CI)) 3859 CastedTo = ConstantExpr::getIntegerCast(C, CI->getSrcTy(), CmpI->isSigned()); 3860 3861 if (isa<FPTruncInst>(CI)) 3862 CastedTo = ConstantExpr::getFPExtend(C, CI->getSrcTy(), true); 3863 3864 if (isa<FPExtInst>(CI)) 3865 CastedTo = ConstantExpr::getFPTrunc(C, CI->getSrcTy(), true); 3866 3867 if (isa<FPToUIInst>(CI)) 3868 CastedTo = ConstantExpr::getUIToFP(C, CI->getSrcTy(), true); 3869 3870 if (isa<FPToSIInst>(CI)) 3871 CastedTo = ConstantExpr::getSIToFP(C, CI->getSrcTy(), true); 3872 3873 if (isa<UIToFPInst>(CI)) 3874 CastedTo = ConstantExpr::getFPToUI(C, CI->getSrcTy(), true); 3875 3876 if (isa<SIToFPInst>(CI)) 3877 CastedTo = ConstantExpr::getFPToSI(C, CI->getSrcTy(), true); 3878 3879 if (!CastedTo) 3880 return nullptr; 3881 3882 Constant *CastedBack = 3883 ConstantExpr::getCast(CI->getOpcode(), CastedTo, C->getType(), true); 3884 // Make sure the cast doesn't lose any information. 3885 if (CastedBack != C) 3886 return nullptr; 3887 3888 return CastedTo; 3889 } 3890 3891 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 3892 Instruction::CastOps *CastOp) { 3893 SelectInst *SI = dyn_cast<SelectInst>(V); 3894 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 3895 3896 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 3897 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 3898 3899 CmpInst::Predicate Pred = CmpI->getPredicate(); 3900 Value *CmpLHS = CmpI->getOperand(0); 3901 Value *CmpRHS = CmpI->getOperand(1); 3902 Value *TrueVal = SI->getTrueValue(); 3903 Value *FalseVal = SI->getFalseValue(); 3904 FastMathFlags FMF; 3905 if (isa<FPMathOperator>(CmpI)) 3906 FMF = CmpI->getFastMathFlags(); 3907 3908 // Bail out early. 3909 if (CmpI->isEquality()) 3910 return {SPF_UNKNOWN, SPNB_NA, false}; 3911 3912 // Deal with type mismatches. 3913 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 3914 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) 3915 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 3916 cast<CastInst>(TrueVal)->getOperand(0), C, 3917 LHS, RHS); 3918 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) 3919 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 3920 C, cast<CastInst>(FalseVal)->getOperand(0), 3921 LHS, RHS); 3922 } 3923 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 3924 LHS, RHS); 3925 } 3926 3927 ConstantRange llvm::getConstantRangeFromMetadata(MDNode &Ranges) { 3928 const unsigned NumRanges = Ranges.getNumOperands() / 2; 3929 assert(NumRanges >= 1 && "Must have at least one range!"); 3930 assert(Ranges.getNumOperands() % 2 == 0 && "Must be a sequence of pairs"); 3931 3932 auto *FirstLow = mdconst::extract<ConstantInt>(Ranges.getOperand(0)); 3933 auto *FirstHigh = mdconst::extract<ConstantInt>(Ranges.getOperand(1)); 3934 3935 ConstantRange CR(FirstLow->getValue(), FirstHigh->getValue()); 3936 3937 for (unsigned i = 1; i < NumRanges; ++i) { 3938 auto *Low = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 3939 auto *High = mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 3940 3941 // Note: unionWith will potentially create a range that contains values not 3942 // contained in any of the original N ranges. 3943 CR = CR.unionWith(ConstantRange(Low->getValue(), High->getValue())); 3944 } 3945 3946 return CR; 3947 } 3948 3949 /// Return true if "icmp Pred LHS RHS" is always true. 3950 static bool isTruePredicate(CmpInst::Predicate Pred, Value *LHS, Value *RHS, 3951 const DataLayout &DL, unsigned Depth, 3952 AssumptionCache *AC, const Instruction *CxtI, 3953 const DominatorTree *DT) { 3954 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 3955 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 3956 return true; 3957 3958 switch (Pred) { 3959 default: 3960 return false; 3961 3962 case CmpInst::ICMP_SLE: { 3963 const APInt *C; 3964 3965 // LHS s<= LHS +_{nsw} C if C >= 0 3966 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 3967 return !C->isNegative(); 3968 return false; 3969 } 3970 3971 case CmpInst::ICMP_ULE: { 3972 const APInt *C; 3973 3974 // LHS u<= LHS +_{nuw} C for any C 3975 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 3976 return true; 3977 3978 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 3979 auto MatchNUWAddsToSameValue = [&](Value *A, Value *B, Value *&X, 3980 const APInt *&CA, const APInt *&CB) { 3981 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 3982 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 3983 return true; 3984 3985 // If X & C == 0 then (X | C) == X +_{nuw} C 3986 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 3987 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 3988 unsigned BitWidth = CA->getBitWidth(); 3989 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 3990 computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT); 3991 3992 if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB) 3993 return true; 3994 } 3995 3996 return false; 3997 }; 3998 3999 Value *X; 4000 const APInt *CLHS, *CRHS; 4001 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 4002 return CLHS->ule(*CRHS); 4003 4004 return false; 4005 } 4006 } 4007 } 4008 4009 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 4010 /// ALHS ARHS" is true. Otherwise, return None. 4011 static Optional<bool> 4012 isImpliedCondOperands(CmpInst::Predicate Pred, Value *ALHS, Value *ARHS, 4013 Value *BLHS, Value *BRHS, const DataLayout &DL, 4014 unsigned Depth, AssumptionCache *AC, 4015 const Instruction *CxtI, const DominatorTree *DT) { 4016 switch (Pred) { 4017 default: 4018 return None; 4019 4020 case CmpInst::ICMP_SLT: 4021 case CmpInst::ICMP_SLE: 4022 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI, 4023 DT) && 4024 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT)) 4025 return true; 4026 return None; 4027 4028 case CmpInst::ICMP_ULT: 4029 case CmpInst::ICMP_ULE: 4030 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI, 4031 DT) && 4032 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT)) 4033 return true; 4034 return None; 4035 } 4036 } 4037 4038 /// Return true if the operands of the two compares match. IsSwappedOps is true 4039 /// when the operands match, but are swapped. 4040 static bool isMatchingOps(Value *ALHS, Value *ARHS, Value *BLHS, Value *BRHS, 4041 bool &IsSwappedOps) { 4042 4043 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 4044 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 4045 return IsMatchingOps || IsSwappedOps; 4046 } 4047 4048 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is 4049 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS 4050 /// BRHS" is false. Otherwise, return None if we can't infer anything. 4051 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 4052 Value *ALHS, Value *ARHS, 4053 CmpInst::Predicate BPred, 4054 Value *BLHS, Value *BRHS, 4055 bool IsSwappedOps) { 4056 // Canonicalize the operands so they're matching. 4057 if (IsSwappedOps) { 4058 std::swap(BLHS, BRHS); 4059 BPred = ICmpInst::getSwappedPredicate(BPred); 4060 } 4061 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 4062 return true; 4063 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 4064 return false; 4065 4066 return None; 4067 } 4068 4069 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is 4070 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS 4071 /// C2" is false. Otherwise, return None if we can't infer anything. 4072 static Optional<bool> 4073 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, Value *ALHS, 4074 ConstantInt *C1, CmpInst::Predicate BPred, 4075 Value *BLHS, ConstantInt *C2) { 4076 assert(ALHS == BLHS && "LHS operands must match."); 4077 ConstantRange DomCR = 4078 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 4079 ConstantRange CR = 4080 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); 4081 ConstantRange Intersection = DomCR.intersectWith(CR); 4082 ConstantRange Difference = DomCR.difference(CR); 4083 if (Intersection.isEmptySet()) 4084 return false; 4085 if (Difference.isEmptySet()) 4086 return true; 4087 return None; 4088 } 4089 4090 Optional<bool> llvm::isImpliedCondition(Value *LHS, Value *RHS, 4091 const DataLayout &DL, bool InvertAPred, 4092 unsigned Depth, AssumptionCache *AC, 4093 const Instruction *CxtI, 4094 const DominatorTree *DT) { 4095 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for example. 4096 if (LHS->getType() != RHS->getType()) 4097 return None; 4098 4099 Type *OpTy = LHS->getType(); 4100 assert(OpTy->getScalarType()->isIntegerTy(1)); 4101 4102 // LHS ==> RHS by definition 4103 if (!InvertAPred && LHS == RHS) 4104 return true; 4105 4106 if (OpTy->isVectorTy()) 4107 // TODO: extending the code below to handle vectors 4108 return None; 4109 assert(OpTy->isIntegerTy(1) && "implied by above"); 4110 4111 ICmpInst::Predicate APred, BPred; 4112 Value *ALHS, *ARHS; 4113 Value *BLHS, *BRHS; 4114 4115 if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) || 4116 !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS)))) 4117 return None; 4118 4119 if (InvertAPred) 4120 APred = CmpInst::getInversePredicate(APred); 4121 4122 // Can we infer anything when the two compares have matching operands? 4123 bool IsSwappedOps; 4124 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) { 4125 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 4126 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps)) 4127 return Implication; 4128 // No amount of additional analysis will infer the second condition, so 4129 // early exit. 4130 return None; 4131 } 4132 4133 // Can we infer anything when the LHS operands match and the RHS operands are 4134 // constants (not necessarily matching)? 4135 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 4136 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 4137 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS, 4138 cast<ConstantInt>(BRHS))) 4139 return Implication; 4140 // No amount of additional analysis will infer the second condition, so 4141 // early exit. 4142 return None; 4143 } 4144 4145 if (APred == BPred) 4146 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC, 4147 CxtI, DT); 4148 4149 return None; 4150 } 4151