1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains routines that help analyze properties that chains of 11 // computations have. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/ADT/Optional.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/Analysis/AssumptionCache.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/Loads.h" 21 #include "llvm/Analysis/LoopInfo.h" 22 #include "llvm/Analysis/MemoryBuiltins.h" 23 #include "llvm/Analysis/OptimizationDiagnosticInfo.h" 24 #include "llvm/Analysis/VectorUtils.h" 25 #include "llvm/IR/CallSite.h" 26 #include "llvm/IR/ConstantRange.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/Dominators.h" 31 #include "llvm/IR/GetElementPtrTypeIterator.h" 32 #include "llvm/IR/GlobalAlias.h" 33 #include "llvm/IR/GlobalVariable.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/IntrinsicInst.h" 36 #include "llvm/IR/LLVMContext.h" 37 #include "llvm/IR/Metadata.h" 38 #include "llvm/IR/Operator.h" 39 #include "llvm/IR/PatternMatch.h" 40 #include "llvm/IR/Statepoint.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/KnownBits.h" 43 #include "llvm/Support/MathExtras.h" 44 #include <algorithm> 45 #include <array> 46 #include <cstring> 47 using namespace llvm; 48 using namespace llvm::PatternMatch; 49 50 const unsigned MaxDepth = 6; 51 52 // Controls the number of uses of the value searched for possible 53 // dominating comparisons. 54 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 55 cl::Hidden, cl::init(20)); 56 57 // This optimization is known to cause performance regressions is some cases, 58 // keep it under a temporary flag for now. 59 static cl::opt<bool> 60 DontImproveNonNegativePhiBits("dont-improve-non-negative-phi-bits", 61 cl::Hidden, cl::init(true)); 62 63 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 64 /// returns the element type's bitwidth. 65 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 66 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 67 return BitWidth; 68 69 return DL.getPointerTypeSizeInBits(Ty); 70 } 71 72 namespace { 73 // Simplifying using an assume can only be done in a particular control-flow 74 // context (the context instruction provides that context). If an assume and 75 // the context instruction are not in the same block then the DT helps in 76 // figuring out if we can use it. 77 struct Query { 78 const DataLayout &DL; 79 AssumptionCache *AC; 80 const Instruction *CxtI; 81 const DominatorTree *DT; 82 // Unlike the other analyses, this may be a nullptr because not all clients 83 // provide it currently. 84 OptimizationRemarkEmitter *ORE; 85 86 /// Set of assumptions that should be excluded from further queries. 87 /// This is because of the potential for mutual recursion to cause 88 /// computeKnownBits to repeatedly visit the same assume intrinsic. The 89 /// classic case of this is assume(x = y), which will attempt to determine 90 /// bits in x from bits in y, which will attempt to determine bits in y from 91 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 92 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo 93 /// (all of which can call computeKnownBits), and so on. 94 std::array<const Value *, MaxDepth> Excluded; 95 unsigned NumExcluded; 96 97 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 98 const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr) 99 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), NumExcluded(0) {} 100 101 Query(const Query &Q, const Value *NewExcl) 102 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), 103 NumExcluded(Q.NumExcluded) { 104 Excluded = Q.Excluded; 105 Excluded[NumExcluded++] = NewExcl; 106 assert(NumExcluded <= Excluded.size()); 107 } 108 109 bool isExcluded(const Value *Value) const { 110 if (NumExcluded == 0) 111 return false; 112 auto End = Excluded.begin() + NumExcluded; 113 return std::find(Excluded.begin(), End, Value) != End; 114 } 115 }; 116 } // end anonymous namespace 117 118 // Given the provided Value and, potentially, a context instruction, return 119 // the preferred context instruction (if any). 120 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 121 // If we've been provided with a context instruction, then use that (provided 122 // it has been inserted). 123 if (CxtI && CxtI->getParent()) 124 return CxtI; 125 126 // If the value is really an already-inserted instruction, then use that. 127 CxtI = dyn_cast<Instruction>(V); 128 if (CxtI && CxtI->getParent()) 129 return CxtI; 130 131 return nullptr; 132 } 133 134 static void computeKnownBits(const Value *V, KnownBits &Known, 135 unsigned Depth, const Query &Q); 136 137 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 138 const DataLayout &DL, unsigned Depth, 139 AssumptionCache *AC, const Instruction *CxtI, 140 const DominatorTree *DT, 141 OptimizationRemarkEmitter *ORE) { 142 ::computeKnownBits(V, Known, Depth, 143 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE)); 144 } 145 146 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 147 const Query &Q); 148 149 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 150 unsigned Depth, AssumptionCache *AC, 151 const Instruction *CxtI, 152 const DominatorTree *DT, 153 OptimizationRemarkEmitter *ORE) { 154 return ::computeKnownBits(V, Depth, 155 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE)); 156 } 157 158 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 159 const DataLayout &DL, 160 AssumptionCache *AC, const Instruction *CxtI, 161 const DominatorTree *DT) { 162 assert(LHS->getType() == RHS->getType() && 163 "LHS and RHS should have the same type"); 164 assert(LHS->getType()->isIntOrIntVectorTy() && 165 "LHS and RHS should be integers"); 166 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 167 KnownBits LHSKnown(IT->getBitWidth()); 168 KnownBits RHSKnown(IT->getBitWidth()); 169 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT); 170 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT); 171 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue(); 172 } 173 174 175 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) { 176 for (const User *U : CxtI->users()) { 177 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) 178 if (IC->isEquality()) 179 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1))) 180 if (C->isNullValue()) 181 continue; 182 return false; 183 } 184 return true; 185 } 186 187 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 188 const Query &Q); 189 190 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 191 bool OrZero, 192 unsigned Depth, AssumptionCache *AC, 193 const Instruction *CxtI, 194 const DominatorTree *DT) { 195 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, 196 Query(DL, AC, safeCxtI(V, CxtI), DT)); 197 } 198 199 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 200 201 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 202 AssumptionCache *AC, const Instruction *CxtI, 203 const DominatorTree *DT) { 204 return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 205 } 206 207 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 208 unsigned Depth, 209 AssumptionCache *AC, const Instruction *CxtI, 210 const DominatorTree *DT) { 211 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT); 212 return Known.isNonNegative(); 213 } 214 215 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 216 AssumptionCache *AC, const Instruction *CxtI, 217 const DominatorTree *DT) { 218 if (auto *CI = dyn_cast<ConstantInt>(V)) 219 return CI->getValue().isStrictlyPositive(); 220 221 // TODO: We'd doing two recursive queries here. We should factor this such 222 // that only a single query is needed. 223 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) && 224 isKnownNonZero(V, DL, Depth, AC, CxtI, DT); 225 } 226 227 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 228 AssumptionCache *AC, const Instruction *CxtI, 229 const DominatorTree *DT) { 230 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT); 231 return Known.isNegative(); 232 } 233 234 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); 235 236 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 237 const DataLayout &DL, 238 AssumptionCache *AC, const Instruction *CxtI, 239 const DominatorTree *DT) { 240 return ::isKnownNonEqual(V1, V2, Query(DL, AC, 241 safeCxtI(V1, safeCxtI(V2, CxtI)), 242 DT)); 243 } 244 245 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 246 const Query &Q); 247 248 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 249 const DataLayout &DL, 250 unsigned Depth, AssumptionCache *AC, 251 const Instruction *CxtI, const DominatorTree *DT) { 252 return ::MaskedValueIsZero(V, Mask, Depth, 253 Query(DL, AC, safeCxtI(V, CxtI), DT)); 254 } 255 256 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 257 const Query &Q); 258 259 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 260 unsigned Depth, AssumptionCache *AC, 261 const Instruction *CxtI, 262 const DominatorTree *DT) { 263 return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 264 } 265 266 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 267 bool NSW, 268 KnownBits &KnownOut, KnownBits &Known2, 269 unsigned Depth, const Query &Q) { 270 unsigned BitWidth = KnownOut.getBitWidth(); 271 272 // If an initial sequence of bits in the result is not needed, the 273 // corresponding bits in the operands are not needed. 274 KnownBits LHSKnown(BitWidth); 275 computeKnownBits(Op0, LHSKnown, Depth + 1, Q); 276 computeKnownBits(Op1, Known2, Depth + 1, Q); 277 278 // Carry in a 1 for a subtract, rather than a 0. 279 uint64_t CarryIn = 0; 280 if (!Add) { 281 // Sum = LHS + ~RHS + 1 282 std::swap(Known2.Zero, Known2.One); 283 CarryIn = 1; 284 } 285 286 APInt PossibleSumZero = ~LHSKnown.Zero + ~Known2.Zero + CarryIn; 287 APInt PossibleSumOne = LHSKnown.One + Known2.One + CarryIn; 288 289 // Compute known bits of the carry. 290 APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnown.Zero ^ Known2.Zero); 291 APInt CarryKnownOne = PossibleSumOne ^ LHSKnown.One ^ Known2.One; 292 293 // Compute set of known bits (where all three relevant bits are known). 294 APInt LHSKnownUnion = LHSKnown.Zero | LHSKnown.One; 295 APInt RHSKnownUnion = Known2.Zero | Known2.One; 296 APInt CarryKnownUnion = CarryKnownZero | CarryKnownOne; 297 APInt Known = LHSKnownUnion & RHSKnownUnion & CarryKnownUnion; 298 299 assert((PossibleSumZero & Known) == (PossibleSumOne & Known) && 300 "known bits of sum differ"); 301 302 // Compute known bits of the result. 303 KnownOut.Zero = ~PossibleSumOne & Known; 304 KnownOut.One = PossibleSumOne & Known; 305 306 // Are we still trying to solve for the sign bit? 307 if (!Known.isSignBitSet()) { 308 if (NSW) { 309 // Adding two non-negative numbers, or subtracting a negative number from 310 // a non-negative one, can't wrap into negative. 311 if (LHSKnown.isNonNegative() && Known2.isNonNegative()) 312 KnownOut.makeNonNegative(); 313 // Adding two negative numbers, or subtracting a non-negative number from 314 // a negative one, can't wrap into non-negative. 315 else if (LHSKnown.isNegative() && Known2.isNegative()) 316 KnownOut.makeNegative(); 317 } 318 } 319 } 320 321 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 322 KnownBits &Known, KnownBits &Known2, 323 unsigned Depth, const Query &Q) { 324 unsigned BitWidth = Known.getBitWidth(); 325 computeKnownBits(Op1, Known, Depth + 1, Q); 326 computeKnownBits(Op0, Known2, Depth + 1, Q); 327 328 bool isKnownNegative = false; 329 bool isKnownNonNegative = false; 330 // If the multiplication is known not to overflow, compute the sign bit. 331 if (NSW) { 332 if (Op0 == Op1) { 333 // The product of a number with itself is non-negative. 334 isKnownNonNegative = true; 335 } else { 336 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 337 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 338 bool isKnownNegativeOp1 = Known.isNegative(); 339 bool isKnownNegativeOp0 = Known2.isNegative(); 340 // The product of two numbers with the same sign is non-negative. 341 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 342 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 343 // The product of a negative number and a non-negative number is either 344 // negative or zero. 345 if (!isKnownNonNegative) 346 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 347 isKnownNonZero(Op0, Depth, Q)) || 348 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 349 isKnownNonZero(Op1, Depth, Q)); 350 } 351 } 352 353 // If low bits are zero in either operand, output low known-0 bits. 354 // Also compute a conservative estimate for high known-0 bits. 355 // More trickiness is possible, but this is sufficient for the 356 // interesting case of alignment computation. 357 unsigned TrailZ = Known.countMinTrailingZeros() + 358 Known2.countMinTrailingZeros(); 359 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 360 Known2.countMinLeadingZeros(), 361 BitWidth) - BitWidth; 362 363 TrailZ = std::min(TrailZ, BitWidth); 364 LeadZ = std::min(LeadZ, BitWidth); 365 Known.resetAll(); 366 Known.Zero.setLowBits(TrailZ); 367 Known.Zero.setHighBits(LeadZ); 368 369 // Only make use of no-wrap flags if we failed to compute the sign bit 370 // directly. This matters if the multiplication always overflows, in 371 // which case we prefer to follow the result of the direct computation, 372 // though as the program is invoking undefined behaviour we can choose 373 // whatever we like here. 374 if (isKnownNonNegative && !Known.isNegative()) 375 Known.makeNonNegative(); 376 else if (isKnownNegative && !Known.isNonNegative()) 377 Known.makeNegative(); 378 } 379 380 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 381 KnownBits &Known) { 382 unsigned BitWidth = Known.getBitWidth(); 383 unsigned NumRanges = Ranges.getNumOperands() / 2; 384 assert(NumRanges >= 1); 385 386 Known.Zero.setAllBits(); 387 Known.One.setAllBits(); 388 389 for (unsigned i = 0; i < NumRanges; ++i) { 390 ConstantInt *Lower = 391 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 392 ConstantInt *Upper = 393 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 394 ConstantRange Range(Lower->getValue(), Upper->getValue()); 395 396 // The first CommonPrefixBits of all values in Range are equal. 397 unsigned CommonPrefixBits = 398 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 399 400 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 401 Known.One &= Range.getUnsignedMax() & Mask; 402 Known.Zero &= ~Range.getUnsignedMax() & Mask; 403 } 404 } 405 406 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 407 SmallVector<const Value *, 16> WorkSet(1, I); 408 SmallPtrSet<const Value *, 32> Visited; 409 SmallPtrSet<const Value *, 16> EphValues; 410 411 // The instruction defining an assumption's condition itself is always 412 // considered ephemeral to that assumption (even if it has other 413 // non-ephemeral users). See r246696's test case for an example. 414 if (is_contained(I->operands(), E)) 415 return true; 416 417 while (!WorkSet.empty()) { 418 const Value *V = WorkSet.pop_back_val(); 419 if (!Visited.insert(V).second) 420 continue; 421 422 // If all uses of this value are ephemeral, then so is this value. 423 if (all_of(V->users(), [&](const User *U) { return EphValues.count(U); })) { 424 if (V == E) 425 return true; 426 427 EphValues.insert(V); 428 if (const User *U = dyn_cast<User>(V)) 429 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 430 J != JE; ++J) { 431 if (isSafeToSpeculativelyExecute(*J)) 432 WorkSet.push_back(*J); 433 } 434 } 435 } 436 437 return false; 438 } 439 440 // Is this an intrinsic that cannot be speculated but also cannot trap? 441 static bool isAssumeLikeIntrinsic(const Instruction *I) { 442 if (const CallInst *CI = dyn_cast<CallInst>(I)) 443 if (Function *F = CI->getCalledFunction()) 444 switch (F->getIntrinsicID()) { 445 default: break; 446 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 447 case Intrinsic::assume: 448 case Intrinsic::dbg_declare: 449 case Intrinsic::dbg_value: 450 case Intrinsic::invariant_start: 451 case Intrinsic::invariant_end: 452 case Intrinsic::lifetime_start: 453 case Intrinsic::lifetime_end: 454 case Intrinsic::objectsize: 455 case Intrinsic::ptr_annotation: 456 case Intrinsic::var_annotation: 457 return true; 458 } 459 460 return false; 461 } 462 463 bool llvm::isValidAssumeForContext(const Instruction *Inv, 464 const Instruction *CxtI, 465 const DominatorTree *DT) { 466 467 // There are two restrictions on the use of an assume: 468 // 1. The assume must dominate the context (or the control flow must 469 // reach the assume whenever it reaches the context). 470 // 2. The context must not be in the assume's set of ephemeral values 471 // (otherwise we will use the assume to prove that the condition 472 // feeding the assume is trivially true, thus causing the removal of 473 // the assume). 474 475 if (DT) { 476 if (DT->dominates(Inv, CxtI)) 477 return true; 478 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 479 // We don't have a DT, but this trivially dominates. 480 return true; 481 } 482 483 // With or without a DT, the only remaining case we will check is if the 484 // instructions are in the same BB. Give up if that is not the case. 485 if (Inv->getParent() != CxtI->getParent()) 486 return false; 487 488 // If we have a dom tree, then we now know that the assume doens't dominate 489 // the other instruction. If we don't have a dom tree then we can check if 490 // the assume is first in the BB. 491 if (!DT) { 492 // Search forward from the assume until we reach the context (or the end 493 // of the block); the common case is that the assume will come first. 494 for (auto I = std::next(BasicBlock::const_iterator(Inv)), 495 IE = Inv->getParent()->end(); I != IE; ++I) 496 if (&*I == CxtI) 497 return true; 498 } 499 500 // The context comes first, but they're both in the same block. Make sure 501 // there is nothing in between that might interrupt the control flow. 502 for (BasicBlock::const_iterator I = 503 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv); 504 I != IE; ++I) 505 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 506 return false; 507 508 return !isEphemeralValueOf(Inv, CxtI); 509 } 510 511 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 512 unsigned Depth, const Query &Q) { 513 // Use of assumptions is context-sensitive. If we don't have a context, we 514 // cannot use them! 515 if (!Q.AC || !Q.CxtI) 516 return; 517 518 unsigned BitWidth = Known.getBitWidth(); 519 520 // Note that the patterns below need to be kept in sync with the code 521 // in AssumptionCache::updateAffectedValues. 522 523 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 524 if (!AssumeVH) 525 continue; 526 CallInst *I = cast<CallInst>(AssumeVH); 527 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 528 "Got assumption for the wrong function!"); 529 if (Q.isExcluded(I)) 530 continue; 531 532 // Warning: This loop can end up being somewhat performance sensetive. 533 // We're running this loop for once for each value queried resulting in a 534 // runtime of ~O(#assumes * #values). 535 536 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 537 "must be an assume intrinsic"); 538 539 Value *Arg = I->getArgOperand(0); 540 541 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 542 assert(BitWidth == 1 && "assume operand is not i1?"); 543 Known.setAllOnes(); 544 return; 545 } 546 if (match(Arg, m_Not(m_Specific(V))) && 547 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 548 assert(BitWidth == 1 && "assume operand is not i1?"); 549 Known.setAllZero(); 550 return; 551 } 552 553 // The remaining tests are all recursive, so bail out if we hit the limit. 554 if (Depth == MaxDepth) 555 continue; 556 557 Value *A, *B; 558 auto m_V = m_CombineOr(m_Specific(V), 559 m_CombineOr(m_PtrToInt(m_Specific(V)), 560 m_BitCast(m_Specific(V)))); 561 562 CmpInst::Predicate Pred; 563 ConstantInt *C; 564 // assume(v = a) 565 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 566 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 567 KnownBits RHSKnown(BitWidth); 568 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 569 Known.Zero |= RHSKnown.Zero; 570 Known.One |= RHSKnown.One; 571 // assume(v & b = a) 572 } else if (match(Arg, 573 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 574 Pred == ICmpInst::ICMP_EQ && 575 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 576 KnownBits RHSKnown(BitWidth); 577 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 578 KnownBits MaskKnown(BitWidth); 579 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 580 581 // For those bits in the mask that are known to be one, we can propagate 582 // known bits from the RHS to V. 583 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 584 Known.One |= RHSKnown.One & MaskKnown.One; 585 // assume(~(v & b) = a) 586 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 587 m_Value(A))) && 588 Pred == ICmpInst::ICMP_EQ && 589 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 590 KnownBits RHSKnown(BitWidth); 591 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 592 KnownBits MaskKnown(BitWidth); 593 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 594 595 // For those bits in the mask that are known to be one, we can propagate 596 // inverted known bits from the RHS to V. 597 Known.Zero |= RHSKnown.One & MaskKnown.One; 598 Known.One |= RHSKnown.Zero & MaskKnown.One; 599 // assume(v | b = a) 600 } else if (match(Arg, 601 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 602 Pred == ICmpInst::ICMP_EQ && 603 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 604 KnownBits RHSKnown(BitWidth); 605 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 606 KnownBits BKnown(BitWidth); 607 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 608 609 // For those bits in B that are known to be zero, we can propagate known 610 // bits from the RHS to V. 611 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 612 Known.One |= RHSKnown.One & BKnown.Zero; 613 // assume(~(v | b) = a) 614 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 615 m_Value(A))) && 616 Pred == ICmpInst::ICMP_EQ && 617 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 618 KnownBits RHSKnown(BitWidth); 619 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 620 KnownBits BKnown(BitWidth); 621 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 622 623 // For those bits in B that are known to be zero, we can propagate 624 // inverted known bits from the RHS to V. 625 Known.Zero |= RHSKnown.One & BKnown.Zero; 626 Known.One |= RHSKnown.Zero & BKnown.Zero; 627 // assume(v ^ b = a) 628 } else if (match(Arg, 629 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 630 Pred == ICmpInst::ICMP_EQ && 631 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 632 KnownBits RHSKnown(BitWidth); 633 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 634 KnownBits BKnown(BitWidth); 635 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 636 637 // For those bits in B that are known to be zero, we can propagate known 638 // bits from the RHS to V. For those bits in B that are known to be one, 639 // we can propagate inverted known bits from the RHS to V. 640 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 641 Known.One |= RHSKnown.One & BKnown.Zero; 642 Known.Zero |= RHSKnown.One & BKnown.One; 643 Known.One |= RHSKnown.Zero & BKnown.One; 644 // assume(~(v ^ b) = a) 645 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 646 m_Value(A))) && 647 Pred == ICmpInst::ICMP_EQ && 648 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 649 KnownBits RHSKnown(BitWidth); 650 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 651 KnownBits BKnown(BitWidth); 652 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 653 654 // For those bits in B that are known to be zero, we can propagate 655 // inverted known bits from the RHS to V. For those bits in B that are 656 // known to be one, we can propagate known bits from the RHS to V. 657 Known.Zero |= RHSKnown.One & BKnown.Zero; 658 Known.One |= RHSKnown.Zero & BKnown.Zero; 659 Known.Zero |= RHSKnown.Zero & BKnown.One; 660 Known.One |= RHSKnown.One & BKnown.One; 661 // assume(v << c = a) 662 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 663 m_Value(A))) && 664 Pred == ICmpInst::ICMP_EQ && 665 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 666 KnownBits RHSKnown(BitWidth); 667 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 668 // For those bits in RHS that are known, we can propagate them to known 669 // bits in V shifted to the right by C. 670 RHSKnown.Zero.lshrInPlace(C->getZExtValue()); 671 Known.Zero |= RHSKnown.Zero; 672 RHSKnown.One.lshrInPlace(C->getZExtValue()); 673 Known.One |= RHSKnown.One; 674 // assume(~(v << c) = a) 675 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 676 m_Value(A))) && 677 Pred == ICmpInst::ICMP_EQ && 678 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 679 KnownBits RHSKnown(BitWidth); 680 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 681 // For those bits in RHS that are known, we can propagate them inverted 682 // to known bits in V shifted to the right by C. 683 RHSKnown.One.lshrInPlace(C->getZExtValue()); 684 Known.Zero |= RHSKnown.One; 685 RHSKnown.Zero.lshrInPlace(C->getZExtValue()); 686 Known.One |= RHSKnown.Zero; 687 // assume(v >> c = a) 688 } else if (match(Arg, 689 m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)), 690 m_AShr(m_V, m_ConstantInt(C))), 691 m_Value(A))) && 692 Pred == ICmpInst::ICMP_EQ && 693 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 694 KnownBits RHSKnown(BitWidth); 695 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 696 // For those bits in RHS that are known, we can propagate them to known 697 // bits in V shifted to the right by C. 698 Known.Zero |= RHSKnown.Zero << C->getZExtValue(); 699 Known.One |= RHSKnown.One << C->getZExtValue(); 700 // assume(~(v >> c) = a) 701 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr( 702 m_LShr(m_V, m_ConstantInt(C)), 703 m_AShr(m_V, m_ConstantInt(C)))), 704 m_Value(A))) && 705 Pred == ICmpInst::ICMP_EQ && 706 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 707 KnownBits RHSKnown(BitWidth); 708 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 709 // For those bits in RHS that are known, we can propagate them inverted 710 // to known bits in V shifted to the right by C. 711 Known.Zero |= RHSKnown.One << C->getZExtValue(); 712 Known.One |= RHSKnown.Zero << C->getZExtValue(); 713 // assume(v >=_s c) where c is non-negative 714 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 715 Pred == ICmpInst::ICMP_SGE && 716 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 717 KnownBits RHSKnown(BitWidth); 718 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 719 720 if (RHSKnown.isNonNegative()) { 721 // We know that the sign bit is zero. 722 Known.makeNonNegative(); 723 } 724 // assume(v >_s c) where c is at least -1. 725 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 726 Pred == ICmpInst::ICMP_SGT && 727 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 728 KnownBits RHSKnown(BitWidth); 729 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 730 731 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { 732 // We know that the sign bit is zero. 733 Known.makeNonNegative(); 734 } 735 // assume(v <=_s c) where c is negative 736 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 737 Pred == ICmpInst::ICMP_SLE && 738 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 739 KnownBits RHSKnown(BitWidth); 740 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 741 742 if (RHSKnown.isNegative()) { 743 // We know that the sign bit is one. 744 Known.makeNegative(); 745 } 746 // assume(v <_s c) where c is non-positive 747 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 748 Pred == ICmpInst::ICMP_SLT && 749 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 750 KnownBits RHSKnown(BitWidth); 751 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 752 753 if (RHSKnown.isZero() || RHSKnown.isNegative()) { 754 // We know that the sign bit is one. 755 Known.makeNegative(); 756 } 757 // assume(v <=_u c) 758 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 759 Pred == ICmpInst::ICMP_ULE && 760 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 761 KnownBits RHSKnown(BitWidth); 762 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 763 764 // Whatever high bits in c are zero are known to be zero. 765 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 766 // assume(v <_u c) 767 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 768 Pred == ICmpInst::ICMP_ULT && 769 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 770 KnownBits RHSKnown(BitWidth); 771 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 772 773 // Whatever high bits in c are zero are known to be zero (if c is a power 774 // of 2, then one more). 775 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) 776 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); 777 else 778 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 779 } 780 } 781 782 // If assumptions conflict with each other or previous known bits, then we 783 // have a logical fallacy. It's possible that the assumption is not reachable, 784 // so this isn't a real bug. On the other hand, the program may have undefined 785 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 786 // clear out the known bits, try to warn the user, and hope for the best. 787 if (Known.Zero.intersects(Known.One)) { 788 Known.resetAll(); 789 790 if (Q.ORE) { 791 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 792 OptimizationRemarkAnalysis ORA("value-tracking", "BadAssumption", CxtI); 793 Q.ORE->emit(ORA << "Detected conflicting code assumptions. Program may " 794 "have undefined behavior, or compiler may have " 795 "internal error."); 796 } 797 } 798 } 799 800 // Compute known bits from a shift operator, including those with a 801 // non-constant shift amount. Known is the outputs of this function. Known2 is a 802 // pre-allocated temporary with the/ same bit width as Known. KZF and KOF are 803 // operator-specific functors that, given the known-zero or known-one bits 804 // respectively, and a shift amount, compute the implied known-zero or known-one 805 // bits of the shift operator's result respectively for that shift amount. The 806 // results from calling KZF and KOF are conservatively combined for all 807 // permitted shift amounts. 808 static void computeKnownBitsFromShiftOperator( 809 const Operator *I, KnownBits &Known, KnownBits &Known2, 810 unsigned Depth, const Query &Q, 811 function_ref<APInt(const APInt &, unsigned)> KZF, 812 function_ref<APInt(const APInt &, unsigned)> KOF) { 813 unsigned BitWidth = Known.getBitWidth(); 814 815 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 816 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); 817 818 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 819 Known.Zero = KZF(Known.Zero, ShiftAmt); 820 Known.One = KOF(Known.One, ShiftAmt); 821 // If there is conflict between Known.Zero and Known.One, this must be an 822 // overflowing left shift, so the shift result is undefined. Clear Known 823 // bits so that other code could propagate this undef. 824 if ((Known.Zero & Known.One) != 0) 825 Known.resetAll(); 826 827 return; 828 } 829 830 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 831 832 // If the shift amount could be greater than or equal to the bit-width of the LHS, the 833 // value could be undef, so we don't know anything about it. 834 if ((~Known.Zero).uge(BitWidth)) { 835 Known.resetAll(); 836 return; 837 } 838 839 // Note: We cannot use Known.Zero.getLimitedValue() here, because if 840 // BitWidth > 64 and any upper bits are known, we'll end up returning the 841 // limit value (which implies all bits are known). 842 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); 843 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); 844 845 // It would be more-clearly correct to use the two temporaries for this 846 // calculation. Reusing the APInts here to prevent unnecessary allocations. 847 Known.resetAll(); 848 849 // If we know the shifter operand is nonzero, we can sometimes infer more 850 // known bits. However this is expensive to compute, so be lazy about it and 851 // only compute it when absolutely necessary. 852 Optional<bool> ShifterOperandIsNonZero; 853 854 // Early exit if we can't constrain any well-defined shift amount. 855 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && 856 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { 857 ShifterOperandIsNonZero = 858 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 859 if (!*ShifterOperandIsNonZero) 860 return; 861 } 862 863 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 864 865 Known.Zero.setAllBits(); 866 Known.One.setAllBits(); 867 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 868 // Combine the shifted known input bits only for those shift amounts 869 // compatible with its known constraints. 870 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 871 continue; 872 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 873 continue; 874 // If we know the shifter is nonzero, we may be able to infer more known 875 // bits. This check is sunk down as far as possible to avoid the expensive 876 // call to isKnownNonZero if the cheaper checks above fail. 877 if (ShiftAmt == 0) { 878 if (!ShifterOperandIsNonZero.hasValue()) 879 ShifterOperandIsNonZero = 880 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 881 if (*ShifterOperandIsNonZero) 882 continue; 883 } 884 885 Known.Zero &= KZF(Known2.Zero, ShiftAmt); 886 Known.One &= KOF(Known2.One, ShiftAmt); 887 } 888 889 // If there are no compatible shift amounts, then we've proven that the shift 890 // amount must be >= the BitWidth, and the result is undefined. We could 891 // return anything we'd like, but we need to make sure the sets of known bits 892 // stay disjoint (it should be better for some other code to actually 893 // propagate the undef than to pick a value here using known bits). 894 if (Known.Zero.intersects(Known.One)) 895 Known.resetAll(); 896 } 897 898 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known, 899 unsigned Depth, const Query &Q) { 900 unsigned BitWidth = Known.getBitWidth(); 901 902 KnownBits Known2(Known); 903 switch (I->getOpcode()) { 904 default: break; 905 case Instruction::Load: 906 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 907 computeKnownBitsFromRangeMetadata(*MD, Known); 908 break; 909 case Instruction::And: { 910 // If either the LHS or the RHS are Zero, the result is zero. 911 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 912 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 913 914 // Output known-1 bits are only known if set in both the LHS & RHS. 915 Known.One &= Known2.One; 916 // Output known-0 are known to be clear if zero in either the LHS | RHS. 917 Known.Zero |= Known2.Zero; 918 919 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 920 // here we handle the more general case of adding any odd number by 921 // matching the form add(x, add(x, y)) where y is odd. 922 // TODO: This could be generalized to clearing any bit set in y where the 923 // following bit is known to be unset in y. 924 Value *Y = nullptr; 925 if (!Known.Zero[0] && !Known.One[0] && 926 (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)), 927 m_Value(Y))) || 928 match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)), 929 m_Value(Y))))) { 930 Known2.resetAll(); 931 computeKnownBits(Y, Known2, Depth + 1, Q); 932 if (Known2.countMinTrailingOnes() > 0) 933 Known.Zero.setBit(0); 934 } 935 break; 936 } 937 case Instruction::Or: { 938 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 939 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 940 941 // Output known-0 bits are only known if clear in both the LHS & RHS. 942 Known.Zero &= Known2.Zero; 943 // Output known-1 are known to be set if set in either the LHS | RHS. 944 Known.One |= Known2.One; 945 break; 946 } 947 case Instruction::Xor: { 948 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 949 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 950 951 // Output known-0 bits are known if clear or set in both the LHS & RHS. 952 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 953 // Output known-1 are known to be set if set in only one of the LHS, RHS. 954 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 955 Known.Zero = std::move(KnownZeroOut); 956 break; 957 } 958 case Instruction::Mul: { 959 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 960 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known, 961 Known2, Depth, Q); 962 break; 963 } 964 case Instruction::UDiv: { 965 // For the purposes of computing leading zeros we can conservatively 966 // treat a udiv as a logical right shift by the power of 2 known to 967 // be less than the denominator. 968 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 969 unsigned LeadZ = Known2.countMinLeadingZeros(); 970 971 Known2.resetAll(); 972 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 973 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 974 if (RHSMaxLeadingZeros != BitWidth) 975 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 976 977 Known.Zero.setHighBits(LeadZ); 978 break; 979 } 980 case Instruction::Select: { 981 const Value *LHS, *RHS; 982 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 983 if (SelectPatternResult::isMinOrMax(SPF)) { 984 computeKnownBits(RHS, Known, Depth + 1, Q); 985 computeKnownBits(LHS, Known2, Depth + 1, Q); 986 } else { 987 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 988 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 989 } 990 991 unsigned MaxHighOnes = 0; 992 unsigned MaxHighZeros = 0; 993 if (SPF == SPF_SMAX) { 994 // If both sides are negative, the result is negative. 995 if (Known.isNegative() && Known2.isNegative()) 996 // We can derive a lower bound on the result by taking the max of the 997 // leading one bits. 998 MaxHighOnes = 999 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1000 // If either side is non-negative, the result is non-negative. 1001 else if (Known.isNonNegative() || Known2.isNonNegative()) 1002 MaxHighZeros = 1; 1003 } else if (SPF == SPF_SMIN) { 1004 // If both sides are non-negative, the result is non-negative. 1005 if (Known.isNonNegative() && Known2.isNonNegative()) 1006 // We can derive an upper bound on the result by taking the max of the 1007 // leading zero bits. 1008 MaxHighZeros = std::max(Known.countMinLeadingZeros(), 1009 Known2.countMinLeadingZeros()); 1010 // If either side is negative, the result is negative. 1011 else if (Known.isNegative() || Known2.isNegative()) 1012 MaxHighOnes = 1; 1013 } else if (SPF == SPF_UMAX) { 1014 // We can derive a lower bound on the result by taking the max of the 1015 // leading one bits. 1016 MaxHighOnes = 1017 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1018 } else if (SPF == SPF_UMIN) { 1019 // We can derive an upper bound on the result by taking the max of the 1020 // leading zero bits. 1021 MaxHighZeros = 1022 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1023 } 1024 1025 // Only known if known in both the LHS and RHS. 1026 Known.One &= Known2.One; 1027 Known.Zero &= Known2.Zero; 1028 if (MaxHighOnes > 0) 1029 Known.One.setHighBits(MaxHighOnes); 1030 if (MaxHighZeros > 0) 1031 Known.Zero.setHighBits(MaxHighZeros); 1032 break; 1033 } 1034 case Instruction::FPTrunc: 1035 case Instruction::FPExt: 1036 case Instruction::FPToUI: 1037 case Instruction::FPToSI: 1038 case Instruction::SIToFP: 1039 case Instruction::UIToFP: 1040 break; // Can't work with floating point. 1041 case Instruction::PtrToInt: 1042 case Instruction::IntToPtr: 1043 // Fall through and handle them the same as zext/trunc. 1044 LLVM_FALLTHROUGH; 1045 case Instruction::ZExt: 1046 case Instruction::Trunc: { 1047 Type *SrcTy = I->getOperand(0)->getType(); 1048 1049 unsigned SrcBitWidth; 1050 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1051 // which fall through here. 1052 SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType()); 1053 1054 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1055 Known = Known.zextOrTrunc(SrcBitWidth); 1056 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1057 Known = Known.zextOrTrunc(BitWidth); 1058 // Any top bits are known to be zero. 1059 if (BitWidth > SrcBitWidth) 1060 Known.Zero.setBitsFrom(SrcBitWidth); 1061 break; 1062 } 1063 case Instruction::BitCast: { 1064 Type *SrcTy = I->getOperand(0)->getType(); 1065 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 1066 // TODO: For now, not handling conversions like: 1067 // (bitcast i64 %x to <2 x i32>) 1068 !I->getType()->isVectorTy()) { 1069 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1070 break; 1071 } 1072 break; 1073 } 1074 case Instruction::SExt: { 1075 // Compute the bits in the result that are not present in the input. 1076 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1077 1078 Known = Known.trunc(SrcBitWidth); 1079 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1080 // If the sign bit of the input is known set or clear, then we know the 1081 // top bits of the result. 1082 Known = Known.sext(BitWidth); 1083 break; 1084 } 1085 case Instruction::Shl: { 1086 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1087 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1088 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) { 1089 APInt KZResult = KnownZero << ShiftAmt; 1090 KZResult.setLowBits(ShiftAmt); // Low bits known 0. 1091 // If this shift has "nsw" keyword, then the result is either a poison 1092 // value or has the same sign bit as the first operand. 1093 if (NSW && KnownZero.isSignBitSet()) 1094 KZResult.setSignBit(); 1095 return KZResult; 1096 }; 1097 1098 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) { 1099 APInt KOResult = KnownOne << ShiftAmt; 1100 if (NSW && KnownOne.isSignBitSet()) 1101 KOResult.setSignBit(); 1102 return KOResult; 1103 }; 1104 1105 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1106 break; 1107 } 1108 case Instruction::LShr: { 1109 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1110 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1111 APInt KZResult = KnownZero.lshr(ShiftAmt); 1112 // High bits known zero. 1113 KZResult.setHighBits(ShiftAmt); 1114 return KZResult; 1115 }; 1116 1117 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1118 return KnownOne.lshr(ShiftAmt); 1119 }; 1120 1121 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1122 break; 1123 } 1124 case Instruction::AShr: { 1125 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1126 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1127 return KnownZero.ashr(ShiftAmt); 1128 }; 1129 1130 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1131 return KnownOne.ashr(ShiftAmt); 1132 }; 1133 1134 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1135 break; 1136 } 1137 case Instruction::Sub: { 1138 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1139 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1140 Known, Known2, Depth, Q); 1141 break; 1142 } 1143 case Instruction::Add: { 1144 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1145 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1146 Known, Known2, Depth, Q); 1147 break; 1148 } 1149 case Instruction::SRem: 1150 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1151 APInt RA = Rem->getValue().abs(); 1152 if (RA.isPowerOf2()) { 1153 APInt LowBits = RA - 1; 1154 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1155 1156 // The low bits of the first operand are unchanged by the srem. 1157 Known.Zero = Known2.Zero & LowBits; 1158 Known.One = Known2.One & LowBits; 1159 1160 // If the first operand is non-negative or has all low bits zero, then 1161 // the upper bits are all zero. 1162 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 1163 Known.Zero |= ~LowBits; 1164 1165 // If the first operand is negative and not all low bits are zero, then 1166 // the upper bits are all one. 1167 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 1168 Known.One |= ~LowBits; 1169 1170 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1171 break; 1172 } 1173 } 1174 1175 // The sign bit is the LHS's sign bit, except when the result of the 1176 // remainder is zero. 1177 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1178 // If it's known zero, our sign bit is also zero. 1179 if (Known2.isNonNegative()) 1180 Known.makeNonNegative(); 1181 1182 break; 1183 case Instruction::URem: { 1184 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1185 const APInt &RA = Rem->getValue(); 1186 if (RA.isPowerOf2()) { 1187 APInt LowBits = (RA - 1); 1188 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1189 Known.Zero |= ~LowBits; 1190 Known.One &= LowBits; 1191 break; 1192 } 1193 } 1194 1195 // Since the result is less than or equal to either operand, any leading 1196 // zero bits in either operand must also exist in the result. 1197 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1198 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1199 1200 unsigned Leaders = 1201 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1202 Known.resetAll(); 1203 Known.Zero.setHighBits(Leaders); 1204 break; 1205 } 1206 1207 case Instruction::Alloca: { 1208 const AllocaInst *AI = cast<AllocaInst>(I); 1209 unsigned Align = AI->getAlignment(); 1210 if (Align == 0) 1211 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); 1212 1213 if (Align > 0) 1214 Known.Zero.setLowBits(countTrailingZeros(Align)); 1215 break; 1216 } 1217 case Instruction::GetElementPtr: { 1218 // Analyze all of the subscripts of this getelementptr instruction 1219 // to determine if we can prove known low zero bits. 1220 KnownBits LocalKnown(BitWidth); 1221 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q); 1222 unsigned TrailZ = LocalKnown.countMinTrailingZeros(); 1223 1224 gep_type_iterator GTI = gep_type_begin(I); 1225 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1226 Value *Index = I->getOperand(i); 1227 if (StructType *STy = GTI.getStructTypeOrNull()) { 1228 // Handle struct member offset arithmetic. 1229 1230 // Handle case when index is vector zeroinitializer 1231 Constant *CIndex = cast<Constant>(Index); 1232 if (CIndex->isZeroValue()) 1233 continue; 1234 1235 if (CIndex->getType()->isVectorTy()) 1236 Index = CIndex->getSplatValue(); 1237 1238 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1239 const StructLayout *SL = Q.DL.getStructLayout(STy); 1240 uint64_t Offset = SL->getElementOffset(Idx); 1241 TrailZ = std::min<unsigned>(TrailZ, 1242 countTrailingZeros(Offset)); 1243 } else { 1244 // Handle array index arithmetic. 1245 Type *IndexedTy = GTI.getIndexedType(); 1246 if (!IndexedTy->isSized()) { 1247 TrailZ = 0; 1248 break; 1249 } 1250 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1251 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1252 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0); 1253 computeKnownBits(Index, LocalKnown, Depth + 1, Q); 1254 TrailZ = std::min(TrailZ, 1255 unsigned(countTrailingZeros(TypeSize) + 1256 LocalKnown.countMinTrailingZeros())); 1257 } 1258 } 1259 1260 Known.Zero.setLowBits(TrailZ); 1261 break; 1262 } 1263 case Instruction::PHI: { 1264 const PHINode *P = cast<PHINode>(I); 1265 // Handle the case of a simple two-predecessor recurrence PHI. 1266 // There's a lot more that could theoretically be done here, but 1267 // this is sufficient to catch some interesting cases. 1268 if (P->getNumIncomingValues() == 2) { 1269 for (unsigned i = 0; i != 2; ++i) { 1270 Value *L = P->getIncomingValue(i); 1271 Value *R = P->getIncomingValue(!i); 1272 Operator *LU = dyn_cast<Operator>(L); 1273 if (!LU) 1274 continue; 1275 unsigned Opcode = LU->getOpcode(); 1276 // Check for operations that have the property that if 1277 // both their operands have low zero bits, the result 1278 // will have low zero bits. 1279 if (Opcode == Instruction::Add || 1280 Opcode == Instruction::Sub || 1281 Opcode == Instruction::And || 1282 Opcode == Instruction::Or || 1283 Opcode == Instruction::Mul) { 1284 Value *LL = LU->getOperand(0); 1285 Value *LR = LU->getOperand(1); 1286 // Find a recurrence. 1287 if (LL == I) 1288 L = LR; 1289 else if (LR == I) 1290 L = LL; 1291 else 1292 break; 1293 // Ok, we have a PHI of the form L op= R. Check for low 1294 // zero bits. 1295 computeKnownBits(R, Known2, Depth + 1, Q); 1296 1297 // We need to take the minimum number of known bits 1298 KnownBits Known3(Known); 1299 computeKnownBits(L, Known3, Depth + 1, Q); 1300 1301 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1302 Known3.countMinTrailingZeros())); 1303 1304 if (DontImproveNonNegativePhiBits) 1305 break; 1306 1307 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU); 1308 if (OverflowOp && OverflowOp->hasNoSignedWrap()) { 1309 // If initial value of recurrence is nonnegative, and we are adding 1310 // a nonnegative number with nsw, the result can only be nonnegative 1311 // or poison value regardless of the number of times we execute the 1312 // add in phi recurrence. If initial value is negative and we are 1313 // adding a negative number with nsw, the result can only be 1314 // negative or poison value. Similar arguments apply to sub and mul. 1315 // 1316 // (add non-negative, non-negative) --> non-negative 1317 // (add negative, negative) --> negative 1318 if (Opcode == Instruction::Add) { 1319 if (Known2.isNonNegative() && Known3.isNonNegative()) 1320 Known.makeNonNegative(); 1321 else if (Known2.isNegative() && Known3.isNegative()) 1322 Known.makeNegative(); 1323 } 1324 1325 // (sub nsw non-negative, negative) --> non-negative 1326 // (sub nsw negative, non-negative) --> negative 1327 else if (Opcode == Instruction::Sub && LL == I) { 1328 if (Known2.isNonNegative() && Known3.isNegative()) 1329 Known.makeNonNegative(); 1330 else if (Known2.isNegative() && Known3.isNonNegative()) 1331 Known.makeNegative(); 1332 } 1333 1334 // (mul nsw non-negative, non-negative) --> non-negative 1335 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1336 Known3.isNonNegative()) 1337 Known.makeNonNegative(); 1338 } 1339 1340 break; 1341 } 1342 } 1343 } 1344 1345 // Unreachable blocks may have zero-operand PHI nodes. 1346 if (P->getNumIncomingValues() == 0) 1347 break; 1348 1349 // Otherwise take the unions of the known bit sets of the operands, 1350 // taking conservative care to avoid excessive recursion. 1351 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) { 1352 // Skip if every incoming value references to ourself. 1353 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1354 break; 1355 1356 Known.Zero.setAllBits(); 1357 Known.One.setAllBits(); 1358 for (Value *IncValue : P->incoming_values()) { 1359 // Skip direct self references. 1360 if (IncValue == P) continue; 1361 1362 Known2 = KnownBits(BitWidth); 1363 // Recurse, but cap the recursion to one level, because we don't 1364 // want to waste time spinning around in loops. 1365 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q); 1366 Known.Zero &= Known2.Zero; 1367 Known.One &= Known2.One; 1368 // If all bits have been ruled out, there's no need to check 1369 // more operands. 1370 if (!Known.Zero && !Known.One) 1371 break; 1372 } 1373 } 1374 break; 1375 } 1376 case Instruction::Call: 1377 case Instruction::Invoke: 1378 // If range metadata is attached to this call, set known bits from that, 1379 // and then intersect with known bits based on other properties of the 1380 // function. 1381 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range)) 1382 computeKnownBitsFromRangeMetadata(*MD, Known); 1383 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) { 1384 computeKnownBits(RV, Known2, Depth + 1, Q); 1385 Known.Zero |= Known2.Zero; 1386 Known.One |= Known2.One; 1387 } 1388 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1389 switch (II->getIntrinsicID()) { 1390 default: break; 1391 case Intrinsic::bitreverse: 1392 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1393 Known.Zero |= Known2.Zero.reverseBits(); 1394 Known.One |= Known2.One.reverseBits(); 1395 break; 1396 case Intrinsic::bswap: 1397 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1398 Known.Zero |= Known2.Zero.byteSwap(); 1399 Known.One |= Known2.One.byteSwap(); 1400 break; 1401 case Intrinsic::ctlz: { 1402 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1403 // If we have a known 1, its position is our upper bound. 1404 unsigned PossibleLZ = Known2.One.countLeadingZeros(); 1405 // If this call is undefined for 0, the result will be less than 2^n. 1406 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1407 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1408 unsigned LowBits = Log2_32(PossibleLZ)+1; 1409 Known.Zero.setBitsFrom(LowBits); 1410 break; 1411 } 1412 case Intrinsic::cttz: { 1413 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1414 // If we have a known 1, its position is our upper bound. 1415 unsigned PossibleTZ = Known2.One.countTrailingZeros(); 1416 // If this call is undefined for 0, the result will be less than 2^n. 1417 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1418 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1419 unsigned LowBits = Log2_32(PossibleTZ)+1; 1420 Known.Zero.setBitsFrom(LowBits); 1421 break; 1422 } 1423 case Intrinsic::ctpop: { 1424 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1425 // We can bound the space the count needs. Also, bits known to be zero 1426 // can't contribute to the population. 1427 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1428 unsigned LowBits = Log2_32(BitsPossiblySet)+1; 1429 Known.Zero.setBitsFrom(LowBits); 1430 // TODO: we could bound KnownOne using the lower bound on the number 1431 // of bits which might be set provided by popcnt KnownOne2. 1432 break; 1433 } 1434 case Intrinsic::x86_sse42_crc32_64_64: 1435 Known.Zero.setBitsFrom(32); 1436 break; 1437 } 1438 } 1439 break; 1440 case Instruction::ExtractElement: 1441 // Look through extract element. At the moment we keep this simple and skip 1442 // tracking the specific element. But at least we might find information 1443 // valid for all elements of the vector (for example if vector is sign 1444 // extended, shifted, etc). 1445 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1446 break; 1447 case Instruction::ExtractValue: 1448 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1449 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1450 if (EVI->getNumIndices() != 1) break; 1451 if (EVI->getIndices()[0] == 0) { 1452 switch (II->getIntrinsicID()) { 1453 default: break; 1454 case Intrinsic::uadd_with_overflow: 1455 case Intrinsic::sadd_with_overflow: 1456 computeKnownBitsAddSub(true, II->getArgOperand(0), 1457 II->getArgOperand(1), false, Known, Known2, 1458 Depth, Q); 1459 break; 1460 case Intrinsic::usub_with_overflow: 1461 case Intrinsic::ssub_with_overflow: 1462 computeKnownBitsAddSub(false, II->getArgOperand(0), 1463 II->getArgOperand(1), false, Known, Known2, 1464 Depth, Q); 1465 break; 1466 case Intrinsic::umul_with_overflow: 1467 case Intrinsic::smul_with_overflow: 1468 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1469 Known, Known2, Depth, Q); 1470 break; 1471 } 1472 } 1473 } 1474 } 1475 } 1476 1477 /// Determine which bits of V are known to be either zero or one and return 1478 /// them. 1479 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { 1480 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1481 computeKnownBits(V, Known, Depth, Q); 1482 return Known; 1483 } 1484 1485 /// Determine which bits of V are known to be either zero or one and return 1486 /// them in the Known bit set. 1487 /// 1488 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1489 /// we cannot optimize based on the assumption that it is zero without changing 1490 /// it to be an explicit zero. If we don't change it to zero, other code could 1491 /// optimized based on the contradictory assumption that it is non-zero. 1492 /// Because instcombine aggressively folds operations with undef args anyway, 1493 /// this won't lose us code quality. 1494 /// 1495 /// This function is defined on values with integer type, values with pointer 1496 /// type, and vectors of integers. In the case 1497 /// where V is a vector, known zero, and known one values are the 1498 /// same width as the vector element, and the bit is set only if it is true 1499 /// for all of the elements in the vector. 1500 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 1501 const Query &Q) { 1502 assert(V && "No Value?"); 1503 assert(Depth <= MaxDepth && "Limit Search Depth"); 1504 unsigned BitWidth = Known.getBitWidth(); 1505 1506 assert((V->getType()->isIntOrIntVectorTy() || 1507 V->getType()->getScalarType()->isPointerTy()) && 1508 "Not integer or pointer type!"); 1509 assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && 1510 (!V->getType()->isIntOrIntVectorTy() || 1511 V->getType()->getScalarSizeInBits() == BitWidth) && 1512 "V and Known should have same BitWidth"); 1513 (void)BitWidth; 1514 1515 const APInt *C; 1516 if (match(V, m_APInt(C))) { 1517 // We know all of the bits for a scalar constant or a splat vector constant! 1518 Known.One = *C; 1519 Known.Zero = ~Known.One; 1520 return; 1521 } 1522 // Null and aggregate-zero are all-zeros. 1523 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1524 Known.setAllZero(); 1525 return; 1526 } 1527 // Handle a constant vector by taking the intersection of the known bits of 1528 // each element. 1529 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 1530 // We know that CDS must be a vector of integers. Take the intersection of 1531 // each element. 1532 Known.Zero.setAllBits(); Known.One.setAllBits(); 1533 APInt Elt(BitWidth, 0); 1534 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1535 Elt = CDS->getElementAsInteger(i); 1536 Known.Zero &= ~Elt; 1537 Known.One &= Elt; 1538 } 1539 return; 1540 } 1541 1542 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1543 // We know that CV must be a vector of integers. Take the intersection of 1544 // each element. 1545 Known.Zero.setAllBits(); Known.One.setAllBits(); 1546 APInt Elt(BitWidth, 0); 1547 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1548 Constant *Element = CV->getAggregateElement(i); 1549 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1550 if (!ElementCI) { 1551 Known.resetAll(); 1552 return; 1553 } 1554 Elt = ElementCI->getValue(); 1555 Known.Zero &= ~Elt; 1556 Known.One &= Elt; 1557 } 1558 return; 1559 } 1560 1561 // Start out not knowing anything. 1562 Known.resetAll(); 1563 1564 // We can't imply anything about undefs. 1565 if (isa<UndefValue>(V)) 1566 return; 1567 1568 // There's no point in looking through other users of ConstantData for 1569 // assumptions. Confirm that we've handled them all. 1570 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1571 1572 // Limit search depth. 1573 // All recursive calls that increase depth must come after this. 1574 if (Depth == MaxDepth) 1575 return; 1576 1577 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1578 // the bits of its aliasee. 1579 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1580 if (!GA->isInterposable()) 1581 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 1582 return; 1583 } 1584 1585 if (const Operator *I = dyn_cast<Operator>(V)) 1586 computeKnownBitsFromOperator(I, Known, Depth, Q); 1587 1588 // Aligned pointers have trailing zeros - refine Known.Zero set 1589 if (V->getType()->isPointerTy()) { 1590 unsigned Align = V->getPointerAlignment(Q.DL); 1591 if (Align) 1592 Known.Zero.setLowBits(countTrailingZeros(Align)); 1593 } 1594 1595 // computeKnownBitsFromAssume strictly refines Known. 1596 // Therefore, we run them after computeKnownBitsFromOperator. 1597 1598 // Check whether a nearby assume intrinsic can determine some known bits. 1599 computeKnownBitsFromAssume(V, Known, Depth, Q); 1600 1601 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1602 } 1603 1604 /// Return true if the given value is known to have exactly one 1605 /// bit set when defined. For vectors return true if every element is known to 1606 /// be a power of two when defined. Supports values with integer or pointer 1607 /// types and vectors of integers. 1608 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 1609 const Query &Q) { 1610 if (const Constant *C = dyn_cast<Constant>(V)) { 1611 if (C->isNullValue()) 1612 return OrZero; 1613 1614 const APInt *ConstIntOrConstSplatInt; 1615 if (match(C, m_APInt(ConstIntOrConstSplatInt))) 1616 return ConstIntOrConstSplatInt->isPowerOf2(); 1617 } 1618 1619 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1620 // it is shifted off the end then the result is undefined. 1621 if (match(V, m_Shl(m_One(), m_Value()))) 1622 return true; 1623 1624 // (signmask) >>l X is clearly a power of two if the one is not shifted off 1625 // the bottom. If it is shifted off the bottom then the result is undefined. 1626 if (match(V, m_LShr(m_SignMask(), m_Value()))) 1627 return true; 1628 1629 // The remaining tests are all recursive, so bail out if we hit the limit. 1630 if (Depth++ == MaxDepth) 1631 return false; 1632 1633 Value *X = nullptr, *Y = nullptr; 1634 // A shift left or a logical shift right of a power of two is a power of two 1635 // or zero. 1636 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1637 match(V, m_LShr(m_Value(X), m_Value())))) 1638 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 1639 1640 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1641 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 1642 1643 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 1644 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 1645 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 1646 1647 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1648 // A power of two and'd with anything is a power of two or zero. 1649 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 1650 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 1651 return true; 1652 // X & (-X) is always a power of two or zero. 1653 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1654 return true; 1655 return false; 1656 } 1657 1658 // Adding a power-of-two or zero to the same power-of-two or zero yields 1659 // either the original power-of-two, a larger power-of-two or zero. 1660 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1661 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1662 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { 1663 if (match(X, m_And(m_Specific(Y), m_Value())) || 1664 match(X, m_And(m_Value(), m_Specific(Y)))) 1665 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 1666 return true; 1667 if (match(Y, m_And(m_Specific(X), m_Value())) || 1668 match(Y, m_And(m_Value(), m_Specific(X)))) 1669 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 1670 return true; 1671 1672 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1673 KnownBits LHSBits(BitWidth); 1674 computeKnownBits(X, LHSBits, Depth, Q); 1675 1676 KnownBits RHSBits(BitWidth); 1677 computeKnownBits(Y, RHSBits, Depth, Q); 1678 // If i8 V is a power of two or zero: 1679 // ZeroBits: 1 1 1 0 1 1 1 1 1680 // ~ZeroBits: 0 0 0 1 0 0 0 0 1681 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 1682 // If OrZero isn't set, we cannot give back a zero result. 1683 // Make sure either the LHS or RHS has a bit set. 1684 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 1685 return true; 1686 } 1687 } 1688 1689 // An exact divide or right shift can only shift off zero bits, so the result 1690 // is a power of two only if the first operand is a power of two and not 1691 // copying a sign bit (sdiv int_min, 2). 1692 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1693 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1694 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1695 Depth, Q); 1696 } 1697 1698 return false; 1699 } 1700 1701 /// \brief Test whether a GEP's result is known to be non-null. 1702 /// 1703 /// Uses properties inherent in a GEP to try to determine whether it is known 1704 /// to be non-null. 1705 /// 1706 /// Currently this routine does not support vector GEPs. 1707 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 1708 const Query &Q) { 1709 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) 1710 return false; 1711 1712 // FIXME: Support vector-GEPs. 1713 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1714 1715 // If the base pointer is non-null, we cannot walk to a null address with an 1716 // inbounds GEP in address space zero. 1717 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 1718 return true; 1719 1720 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1721 // If so, then the GEP cannot produce a null pointer, as doing so would 1722 // inherently violate the inbounds contract within address space zero. 1723 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1724 GTI != GTE; ++GTI) { 1725 // Struct types are easy -- they must always be indexed by a constant. 1726 if (StructType *STy = GTI.getStructTypeOrNull()) { 1727 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1728 unsigned ElementIdx = OpC->getZExtValue(); 1729 const StructLayout *SL = Q.DL.getStructLayout(STy); 1730 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1731 if (ElementOffset > 0) 1732 return true; 1733 continue; 1734 } 1735 1736 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1737 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1738 continue; 1739 1740 // Fast path the constant operand case both for efficiency and so we don't 1741 // increment Depth when just zipping down an all-constant GEP. 1742 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1743 if (!OpC->isZero()) 1744 return true; 1745 continue; 1746 } 1747 1748 // We post-increment Depth here because while isKnownNonZero increments it 1749 // as well, when we pop back up that increment won't persist. We don't want 1750 // to recurse 10k times just because we have 10k GEP operands. We don't 1751 // bail completely out because we want to handle constant GEPs regardless 1752 // of depth. 1753 if (Depth++ >= MaxDepth) 1754 continue; 1755 1756 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 1757 return true; 1758 } 1759 1760 return false; 1761 } 1762 1763 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1764 /// ensure that the value it's attached to is never Value? 'RangeType' is 1765 /// is the type of the value described by the range. 1766 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 1767 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1768 assert(NumRanges >= 1); 1769 for (unsigned i = 0; i < NumRanges; ++i) { 1770 ConstantInt *Lower = 1771 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1772 ConstantInt *Upper = 1773 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1774 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1775 if (Range.contains(Value)) 1776 return false; 1777 } 1778 return true; 1779 } 1780 1781 /// Return true if the given value is known to be non-zero when defined. For 1782 /// vectors, return true if every element is known to be non-zero when 1783 /// defined. For pointers, if the context instruction and dominator tree are 1784 /// specified, perform context-sensitive analysis and return true if the 1785 /// pointer couldn't possibly be null at the specified instruction. 1786 /// Supports values with integer or pointer type and vectors of integers. 1787 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) { 1788 if (auto *C = dyn_cast<Constant>(V)) { 1789 if (C->isNullValue()) 1790 return false; 1791 if (isa<ConstantInt>(C)) 1792 // Must be non-zero due to null test above. 1793 return true; 1794 1795 // For constant vectors, check that all elements are undefined or known 1796 // non-zero to determine that the whole vector is known non-zero. 1797 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) { 1798 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 1799 Constant *Elt = C->getAggregateElement(i); 1800 if (!Elt || Elt->isNullValue()) 1801 return false; 1802 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 1803 return false; 1804 } 1805 return true; 1806 } 1807 1808 return false; 1809 } 1810 1811 if (auto *I = dyn_cast<Instruction>(V)) { 1812 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) { 1813 // If the possible ranges don't contain zero, then the value is 1814 // definitely non-zero. 1815 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 1816 const APInt ZeroValue(Ty->getBitWidth(), 0); 1817 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 1818 return true; 1819 } 1820 } 1821 } 1822 1823 // The remaining tests are all recursive, so bail out if we hit the limit. 1824 if (Depth++ >= MaxDepth) 1825 return false; 1826 1827 // Check for pointer simplifications. 1828 if (V->getType()->isPointerTy()) { 1829 if (isKnownNonNullAt(V, Q.CxtI, Q.DT)) 1830 return true; 1831 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 1832 if (isGEPKnownNonNull(GEP, Depth, Q)) 1833 return true; 1834 } 1835 1836 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 1837 1838 // X | Y != 0 if X != 0 or Y != 0. 1839 Value *X = nullptr, *Y = nullptr; 1840 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 1841 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q); 1842 1843 // ext X != 0 if X != 0. 1844 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 1845 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 1846 1847 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 1848 // if the lowest bit is shifted off the end. 1849 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { 1850 // shl nuw can't remove any non-zero bits. 1851 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1852 if (BO->hasNoUnsignedWrap()) 1853 return isKnownNonZero(X, Depth, Q); 1854 1855 KnownBits Known(BitWidth); 1856 computeKnownBits(X, Known, Depth, Q); 1857 if (Known.One[0]) 1858 return true; 1859 } 1860 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 1861 // defined if the sign bit is shifted off the end. 1862 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 1863 // shr exact can only shift out zero bits. 1864 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 1865 if (BO->isExact()) 1866 return isKnownNonZero(X, Depth, Q); 1867 1868 KnownBits Known = computeKnownBits(X, Depth, Q); 1869 if (Known.isNegative()) 1870 return true; 1871 1872 // If the shifter operand is a constant, and all of the bits shifted 1873 // out are known to be zero, and X is known non-zero then at least one 1874 // non-zero bit must remain. 1875 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 1876 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 1877 // Is there a known one in the portion not shifted out? 1878 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) 1879 return true; 1880 // Are all the bits to be shifted out known zero? 1881 if (Known.countMinTrailingZeros() >= ShiftVal) 1882 return isKnownNonZero(X, Depth, Q); 1883 } 1884 } 1885 // div exact can only produce a zero if the dividend is zero. 1886 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 1887 return isKnownNonZero(X, Depth, Q); 1888 } 1889 // X + Y. 1890 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1891 KnownBits XKnown = computeKnownBits(X, Depth, Q); 1892 KnownBits YKnown = computeKnownBits(Y, Depth, Q); 1893 1894 // If X and Y are both non-negative (as signed values) then their sum is not 1895 // zero unless both X and Y are zero. 1896 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 1897 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q)) 1898 return true; 1899 1900 // If X and Y are both negative (as signed values) then their sum is not 1901 // zero unless both X and Y equal INT_MIN. 1902 if (XKnown.isNegative() && YKnown.isNegative()) { 1903 APInt Mask = APInt::getSignedMaxValue(BitWidth); 1904 // The sign bit of X is set. If some other bit is set then X is not equal 1905 // to INT_MIN. 1906 if (XKnown.One.intersects(Mask)) 1907 return true; 1908 // The sign bit of Y is set. If some other bit is set then Y is not equal 1909 // to INT_MIN. 1910 if (YKnown.One.intersects(Mask)) 1911 return true; 1912 } 1913 1914 // The sum of a non-negative number and a power of two is not zero. 1915 if (XKnown.isNonNegative() && 1916 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 1917 return true; 1918 if (YKnown.isNonNegative() && 1919 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 1920 return true; 1921 } 1922 // X * Y. 1923 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 1924 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1925 // If X and Y are non-zero then so is X * Y as long as the multiplication 1926 // does not overflow. 1927 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 1928 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q)) 1929 return true; 1930 } 1931 // (C ? X : Y) != 0 if X != 0 and Y != 0. 1932 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 1933 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) && 1934 isKnownNonZero(SI->getFalseValue(), Depth, Q)) 1935 return true; 1936 } 1937 // PHI 1938 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 1939 // Try and detect a recurrence that monotonically increases from a 1940 // starting value, as these are common as induction variables. 1941 if (PN->getNumIncomingValues() == 2) { 1942 Value *Start = PN->getIncomingValue(0); 1943 Value *Induction = PN->getIncomingValue(1); 1944 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 1945 std::swap(Start, Induction); 1946 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 1947 if (!C->isZero() && !C->isNegative()) { 1948 ConstantInt *X; 1949 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 1950 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 1951 !X->isNegative()) 1952 return true; 1953 } 1954 } 1955 } 1956 // Check if all incoming values are non-zero constant. 1957 bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) { 1958 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue(); 1959 }); 1960 if (AllNonZeroConstants) 1961 return true; 1962 } 1963 1964 KnownBits Known(BitWidth); 1965 computeKnownBits(V, Known, Depth, Q); 1966 return Known.One != 0; 1967 } 1968 1969 /// Return true if V2 == V1 + X, where X is known non-zero. 1970 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { 1971 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 1972 if (!BO || BO->getOpcode() != Instruction::Add) 1973 return false; 1974 Value *Op = nullptr; 1975 if (V2 == BO->getOperand(0)) 1976 Op = BO->getOperand(1); 1977 else if (V2 == BO->getOperand(1)) 1978 Op = BO->getOperand(0); 1979 else 1980 return false; 1981 return isKnownNonZero(Op, 0, Q); 1982 } 1983 1984 /// Return true if it is known that V1 != V2. 1985 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { 1986 if (V1 == V2) 1987 return false; 1988 if (V1->getType() != V2->getType()) 1989 // We can't look through casts yet. 1990 return false; 1991 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) 1992 return true; 1993 1994 if (V1->getType()->isIntOrIntVectorTy()) { 1995 // Are any known bits in V1 contradictory to known bits in V2? If V1 1996 // has a known zero where V2 has a known one, they must not be equal. 1997 KnownBits Known1 = computeKnownBits(V1, 0, Q); 1998 KnownBits Known2 = computeKnownBits(V2, 0, Q); 1999 2000 if (Known1.Zero.intersects(Known2.One) || 2001 Known2.Zero.intersects(Known1.One)) 2002 return true; 2003 } 2004 return false; 2005 } 2006 2007 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2008 /// simplify operations downstream. Mask is known to be zero for bits that V 2009 /// cannot have. 2010 /// 2011 /// This function is defined on values with integer type, values with pointer 2012 /// type, and vectors of integers. In the case 2013 /// where V is a vector, the mask, known zero, and known one values are the 2014 /// same width as the vector element, and the bit is set only if it is true 2015 /// for all of the elements in the vector. 2016 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2017 const Query &Q) { 2018 KnownBits Known(Mask.getBitWidth()); 2019 computeKnownBits(V, Known, Depth, Q); 2020 return Mask.isSubsetOf(Known.Zero); 2021 } 2022 2023 /// For vector constants, loop over the elements and find the constant with the 2024 /// minimum number of sign bits. Return 0 if the value is not a vector constant 2025 /// or if any element was not analyzed; otherwise, return the count for the 2026 /// element with the minimum number of sign bits. 2027 static unsigned computeNumSignBitsVectorConstant(const Value *V, 2028 unsigned TyBits) { 2029 const auto *CV = dyn_cast<Constant>(V); 2030 if (!CV || !CV->getType()->isVectorTy()) 2031 return 0; 2032 2033 unsigned MinSignBits = TyBits; 2034 unsigned NumElts = CV->getType()->getVectorNumElements(); 2035 for (unsigned i = 0; i != NumElts; ++i) { 2036 // If we find a non-ConstantInt, bail out. 2037 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 2038 if (!Elt) 2039 return 0; 2040 2041 // If the sign bit is 1, flip the bits, so we always count leading zeros. 2042 APInt EltVal = Elt->getValue(); 2043 if (EltVal.isNegative()) 2044 EltVal = ~EltVal; 2045 MinSignBits = std::min(MinSignBits, EltVal.countLeadingZeros()); 2046 } 2047 2048 return MinSignBits; 2049 } 2050 2051 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2052 const Query &Q); 2053 2054 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 2055 const Query &Q) { 2056 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q); 2057 assert(Result > 0 && "At least one sign bit needs to be present!"); 2058 return Result; 2059 } 2060 2061 /// Return the number of times the sign bit of the register is replicated into 2062 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2063 /// (itself), but other cases can give us information. For example, immediately 2064 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2065 /// other, so we return 3. For vectors, return the number of sign bits for the 2066 /// vector element with the mininum number of known sign bits. 2067 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2068 const Query &Q) { 2069 2070 // We return the minimum number of sign bits that are guaranteed to be present 2071 // in V, so for undef we have to conservatively return 1. We don't have the 2072 // same behavior for poison though -- that's a FIXME today. 2073 2074 unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType()); 2075 unsigned Tmp, Tmp2; 2076 unsigned FirstAnswer = 1; 2077 2078 // Note that ConstantInt is handled by the general computeKnownBits case 2079 // below. 2080 2081 if (Depth == MaxDepth) 2082 return 1; // Limit search depth. 2083 2084 const Operator *U = dyn_cast<Operator>(V); 2085 switch (Operator::getOpcode(V)) { 2086 default: break; 2087 case Instruction::SExt: 2088 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2089 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 2090 2091 case Instruction::SDiv: { 2092 const APInt *Denominator; 2093 // sdiv X, C -> adds log(C) sign bits. 2094 if (match(U->getOperand(1), m_APInt(Denominator))) { 2095 2096 // Ignore non-positive denominator. 2097 if (!Denominator->isStrictlyPositive()) 2098 break; 2099 2100 // Calculate the incoming numerator bits. 2101 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2102 2103 // Add floor(log(C)) bits to the numerator bits. 2104 return std::min(TyBits, NumBits + Denominator->logBase2()); 2105 } 2106 break; 2107 } 2108 2109 case Instruction::SRem: { 2110 const APInt *Denominator; 2111 // srem X, C -> we know that the result is within [-C+1,C) when C is a 2112 // positive constant. This let us put a lower bound on the number of sign 2113 // bits. 2114 if (match(U->getOperand(1), m_APInt(Denominator))) { 2115 2116 // Ignore non-positive denominator. 2117 if (!Denominator->isStrictlyPositive()) 2118 break; 2119 2120 // Calculate the incoming numerator bits. SRem by a positive constant 2121 // can't lower the number of sign bits. 2122 unsigned NumrBits = 2123 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2124 2125 // Calculate the leading sign bit constraints by examining the 2126 // denominator. Given that the denominator is positive, there are two 2127 // cases: 2128 // 2129 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2130 // (1 << ceilLogBase2(C)). 2131 // 2132 // 2. the numerator is negative. Then the result range is (-C,0] and 2133 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2134 // 2135 // Thus a lower bound on the number of sign bits is `TyBits - 2136 // ceilLogBase2(C)`. 2137 2138 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2139 return std::max(NumrBits, ResBits); 2140 } 2141 break; 2142 } 2143 2144 case Instruction::AShr: { 2145 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2146 // ashr X, C -> adds C sign bits. Vectors too. 2147 const APInt *ShAmt; 2148 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2149 unsigned ShAmtLimited = ShAmt->getZExtValue(); 2150 if (ShAmtLimited >= TyBits) 2151 break; // Bad shift. 2152 Tmp += ShAmtLimited; 2153 if (Tmp > TyBits) Tmp = TyBits; 2154 } 2155 return Tmp; 2156 } 2157 case Instruction::Shl: { 2158 const APInt *ShAmt; 2159 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2160 // shl destroys sign bits. 2161 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2162 Tmp2 = ShAmt->getZExtValue(); 2163 if (Tmp2 >= TyBits || // Bad shift. 2164 Tmp2 >= Tmp) break; // Shifted all sign bits out. 2165 return Tmp - Tmp2; 2166 } 2167 break; 2168 } 2169 case Instruction::And: 2170 case Instruction::Or: 2171 case Instruction::Xor: // NOT is handled here. 2172 // Logical binary ops preserve the number of sign bits at the worst. 2173 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2174 if (Tmp != 1) { 2175 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2176 FirstAnswer = std::min(Tmp, Tmp2); 2177 // We computed what we know about the sign bits as our first 2178 // answer. Now proceed to the generic code that uses 2179 // computeKnownBits, and pick whichever answer is better. 2180 } 2181 break; 2182 2183 case Instruction::Select: 2184 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2185 if (Tmp == 1) return 1; // Early out. 2186 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 2187 return std::min(Tmp, Tmp2); 2188 2189 case Instruction::Add: 2190 // Add can have at most one carry bit. Thus we know that the output 2191 // is, at worst, one more bit than the inputs. 2192 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2193 if (Tmp == 1) return 1; // Early out. 2194 2195 // Special case decrementing a value (ADD X, -1): 2196 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2197 if (CRHS->isAllOnesValue()) { 2198 KnownBits Known(TyBits); 2199 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 2200 2201 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2202 // sign bits set. 2203 if ((Known.Zero | 1).isAllOnesValue()) 2204 return TyBits; 2205 2206 // If we are subtracting one from a positive number, there is no carry 2207 // out of the result. 2208 if (Known.isNonNegative()) 2209 return Tmp; 2210 } 2211 2212 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2213 if (Tmp2 == 1) return 1; 2214 return std::min(Tmp, Tmp2)-1; 2215 2216 case Instruction::Sub: 2217 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2218 if (Tmp2 == 1) return 1; 2219 2220 // Handle NEG. 2221 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2222 if (CLHS->isNullValue()) { 2223 KnownBits Known(TyBits); 2224 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 2225 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2226 // sign bits set. 2227 if ((Known.Zero | 1).isAllOnesValue()) 2228 return TyBits; 2229 2230 // If the input is known to be positive (the sign bit is known clear), 2231 // the output of the NEG has the same number of sign bits as the input. 2232 if (Known.isNonNegative()) 2233 return Tmp2; 2234 2235 // Otherwise, we treat this like a SUB. 2236 } 2237 2238 // Sub can have at most one carry bit. Thus we know that the output 2239 // is, at worst, one more bit than the inputs. 2240 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2241 if (Tmp == 1) return 1; // Early out. 2242 return std::min(Tmp, Tmp2)-1; 2243 2244 case Instruction::PHI: { 2245 const PHINode *PN = cast<PHINode>(U); 2246 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2247 // Don't analyze large in-degree PHIs. 2248 if (NumIncomingValues > 4) break; 2249 // Unreachable blocks may have zero-operand PHI nodes. 2250 if (NumIncomingValues == 0) break; 2251 2252 // Take the minimum of all incoming values. This can't infinitely loop 2253 // because of our depth threshold. 2254 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q); 2255 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2256 if (Tmp == 1) return Tmp; 2257 Tmp = std::min( 2258 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q)); 2259 } 2260 return Tmp; 2261 } 2262 2263 case Instruction::Trunc: 2264 // FIXME: it's tricky to do anything useful for this, but it is an important 2265 // case for targets like X86. 2266 break; 2267 2268 case Instruction::ExtractElement: 2269 // Look through extract element. At the moment we keep this simple and skip 2270 // tracking the specific element. But at least we might find information 2271 // valid for all elements of the vector (for example if vector is sign 2272 // extended, shifted, etc). 2273 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2274 } 2275 2276 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2277 // use this information. 2278 2279 // If we can examine all elements of a vector constant successfully, we're 2280 // done (we can't do any better than that). If not, keep trying. 2281 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits)) 2282 return VecSignBits; 2283 2284 KnownBits Known(TyBits); 2285 computeKnownBits(V, Known, Depth, Q); 2286 2287 // If we know that the sign bit is either zero or one, determine the number of 2288 // identical bits in the top of the input value. 2289 return std::max(FirstAnswer, Known.countMinSignBits()); 2290 } 2291 2292 /// This function computes the integer multiple of Base that equals V. 2293 /// If successful, it returns true and returns the multiple in 2294 /// Multiple. If unsuccessful, it returns false. It looks 2295 /// through SExt instructions only if LookThroughSExt is true. 2296 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2297 bool LookThroughSExt, unsigned Depth) { 2298 const unsigned MaxDepth = 6; 2299 2300 assert(V && "No Value?"); 2301 assert(Depth <= MaxDepth && "Limit Search Depth"); 2302 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2303 2304 Type *T = V->getType(); 2305 2306 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2307 2308 if (Base == 0) 2309 return false; 2310 2311 if (Base == 1) { 2312 Multiple = V; 2313 return true; 2314 } 2315 2316 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2317 Constant *BaseVal = ConstantInt::get(T, Base); 2318 if (CO && CO == BaseVal) { 2319 // Multiple is 1. 2320 Multiple = ConstantInt::get(T, 1); 2321 return true; 2322 } 2323 2324 if (CI && CI->getZExtValue() % Base == 0) { 2325 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2326 return true; 2327 } 2328 2329 if (Depth == MaxDepth) return false; // Limit search depth. 2330 2331 Operator *I = dyn_cast<Operator>(V); 2332 if (!I) return false; 2333 2334 switch (I->getOpcode()) { 2335 default: break; 2336 case Instruction::SExt: 2337 if (!LookThroughSExt) return false; 2338 // otherwise fall through to ZExt 2339 LLVM_FALLTHROUGH; 2340 case Instruction::ZExt: 2341 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2342 LookThroughSExt, Depth+1); 2343 case Instruction::Shl: 2344 case Instruction::Mul: { 2345 Value *Op0 = I->getOperand(0); 2346 Value *Op1 = I->getOperand(1); 2347 2348 if (I->getOpcode() == Instruction::Shl) { 2349 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2350 if (!Op1CI) return false; 2351 // Turn Op0 << Op1 into Op0 * 2^Op1 2352 APInt Op1Int = Op1CI->getValue(); 2353 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2354 APInt API(Op1Int.getBitWidth(), 0); 2355 API.setBit(BitToSet); 2356 Op1 = ConstantInt::get(V->getContext(), API); 2357 } 2358 2359 Value *Mul0 = nullptr; 2360 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2361 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2362 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2363 if (Op1C->getType()->getPrimitiveSizeInBits() < 2364 MulC->getType()->getPrimitiveSizeInBits()) 2365 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2366 if (Op1C->getType()->getPrimitiveSizeInBits() > 2367 MulC->getType()->getPrimitiveSizeInBits()) 2368 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2369 2370 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2371 Multiple = ConstantExpr::getMul(MulC, Op1C); 2372 return true; 2373 } 2374 2375 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2376 if (Mul0CI->getValue() == 1) { 2377 // V == Base * Op1, so return Op1 2378 Multiple = Op1; 2379 return true; 2380 } 2381 } 2382 2383 Value *Mul1 = nullptr; 2384 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2385 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2386 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2387 if (Op0C->getType()->getPrimitiveSizeInBits() < 2388 MulC->getType()->getPrimitiveSizeInBits()) 2389 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2390 if (Op0C->getType()->getPrimitiveSizeInBits() > 2391 MulC->getType()->getPrimitiveSizeInBits()) 2392 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2393 2394 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2395 Multiple = ConstantExpr::getMul(MulC, Op0C); 2396 return true; 2397 } 2398 2399 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2400 if (Mul1CI->getValue() == 1) { 2401 // V == Base * Op0, so return Op0 2402 Multiple = Op0; 2403 return true; 2404 } 2405 } 2406 } 2407 } 2408 2409 // We could not determine if V is a multiple of Base. 2410 return false; 2411 } 2412 2413 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS, 2414 const TargetLibraryInfo *TLI) { 2415 const Function *F = ICS.getCalledFunction(); 2416 if (!F) 2417 return Intrinsic::not_intrinsic; 2418 2419 if (F->isIntrinsic()) 2420 return F->getIntrinsicID(); 2421 2422 if (!TLI) 2423 return Intrinsic::not_intrinsic; 2424 2425 LibFunc Func; 2426 // We're going to make assumptions on the semantics of the functions, check 2427 // that the target knows that it's available in this environment and it does 2428 // not have local linkage. 2429 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func)) 2430 return Intrinsic::not_intrinsic; 2431 2432 if (!ICS.onlyReadsMemory()) 2433 return Intrinsic::not_intrinsic; 2434 2435 // Otherwise check if we have a call to a function that can be turned into a 2436 // vector intrinsic. 2437 switch (Func) { 2438 default: 2439 break; 2440 case LibFunc_sin: 2441 case LibFunc_sinf: 2442 case LibFunc_sinl: 2443 return Intrinsic::sin; 2444 case LibFunc_cos: 2445 case LibFunc_cosf: 2446 case LibFunc_cosl: 2447 return Intrinsic::cos; 2448 case LibFunc_exp: 2449 case LibFunc_expf: 2450 case LibFunc_expl: 2451 return Intrinsic::exp; 2452 case LibFunc_exp2: 2453 case LibFunc_exp2f: 2454 case LibFunc_exp2l: 2455 return Intrinsic::exp2; 2456 case LibFunc_log: 2457 case LibFunc_logf: 2458 case LibFunc_logl: 2459 return Intrinsic::log; 2460 case LibFunc_log10: 2461 case LibFunc_log10f: 2462 case LibFunc_log10l: 2463 return Intrinsic::log10; 2464 case LibFunc_log2: 2465 case LibFunc_log2f: 2466 case LibFunc_log2l: 2467 return Intrinsic::log2; 2468 case LibFunc_fabs: 2469 case LibFunc_fabsf: 2470 case LibFunc_fabsl: 2471 return Intrinsic::fabs; 2472 case LibFunc_fmin: 2473 case LibFunc_fminf: 2474 case LibFunc_fminl: 2475 return Intrinsic::minnum; 2476 case LibFunc_fmax: 2477 case LibFunc_fmaxf: 2478 case LibFunc_fmaxl: 2479 return Intrinsic::maxnum; 2480 case LibFunc_copysign: 2481 case LibFunc_copysignf: 2482 case LibFunc_copysignl: 2483 return Intrinsic::copysign; 2484 case LibFunc_floor: 2485 case LibFunc_floorf: 2486 case LibFunc_floorl: 2487 return Intrinsic::floor; 2488 case LibFunc_ceil: 2489 case LibFunc_ceilf: 2490 case LibFunc_ceill: 2491 return Intrinsic::ceil; 2492 case LibFunc_trunc: 2493 case LibFunc_truncf: 2494 case LibFunc_truncl: 2495 return Intrinsic::trunc; 2496 case LibFunc_rint: 2497 case LibFunc_rintf: 2498 case LibFunc_rintl: 2499 return Intrinsic::rint; 2500 case LibFunc_nearbyint: 2501 case LibFunc_nearbyintf: 2502 case LibFunc_nearbyintl: 2503 return Intrinsic::nearbyint; 2504 case LibFunc_round: 2505 case LibFunc_roundf: 2506 case LibFunc_roundl: 2507 return Intrinsic::round; 2508 case LibFunc_pow: 2509 case LibFunc_powf: 2510 case LibFunc_powl: 2511 return Intrinsic::pow; 2512 case LibFunc_sqrt: 2513 case LibFunc_sqrtf: 2514 case LibFunc_sqrtl: 2515 if (ICS->hasNoNaNs()) 2516 return Intrinsic::sqrt; 2517 return Intrinsic::not_intrinsic; 2518 } 2519 2520 return Intrinsic::not_intrinsic; 2521 } 2522 2523 /// Return true if we can prove that the specified FP value is never equal to 2524 /// -0.0. 2525 /// 2526 /// NOTE: this function will need to be revisited when we support non-default 2527 /// rounding modes! 2528 /// 2529 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 2530 unsigned Depth) { 2531 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 2532 return !CFP->getValueAPF().isNegZero(); 2533 2534 if (Depth == MaxDepth) 2535 return false; // Limit search depth. 2536 2537 const Operator *I = dyn_cast<Operator>(V); 2538 if (!I) return false; 2539 2540 // Check if the nsz fast-math flag is set 2541 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I)) 2542 if (FPO->hasNoSignedZeros()) 2543 return true; 2544 2545 // (add x, 0.0) is guaranteed to return +0.0, not -0.0. 2546 if (I->getOpcode() == Instruction::FAdd) 2547 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1))) 2548 if (CFP->isNullValue()) 2549 return true; 2550 2551 // sitofp and uitofp turn into +0.0 for zero. 2552 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) 2553 return true; 2554 2555 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 2556 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2557 switch (IID) { 2558 default: 2559 break; 2560 // sqrt(-0.0) = -0.0, no other negative results are possible. 2561 case Intrinsic::sqrt: 2562 return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1); 2563 // fabs(x) != -0.0 2564 case Intrinsic::fabs: 2565 return true; 2566 } 2567 } 2568 2569 return false; 2570 } 2571 2572 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 2573 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 2574 /// bit despite comparing equal. 2575 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 2576 const TargetLibraryInfo *TLI, 2577 bool SignBitOnly, 2578 unsigned Depth) { 2579 // TODO: This function does not do the right thing when SignBitOnly is true 2580 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 2581 // which flips the sign bits of NaNs. See 2582 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2583 2584 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2585 return !CFP->getValueAPF().isNegative() || 2586 (!SignBitOnly && CFP->getValueAPF().isZero()); 2587 } 2588 2589 if (Depth == MaxDepth) 2590 return false; // Limit search depth. 2591 2592 const Operator *I = dyn_cast<Operator>(V); 2593 if (!I) 2594 return false; 2595 2596 switch (I->getOpcode()) { 2597 default: 2598 break; 2599 // Unsigned integers are always nonnegative. 2600 case Instruction::UIToFP: 2601 return true; 2602 case Instruction::FMul: 2603 // x*x is always non-negative or a NaN. 2604 if (I->getOperand(0) == I->getOperand(1) && 2605 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 2606 return true; 2607 2608 LLVM_FALLTHROUGH; 2609 case Instruction::FAdd: 2610 case Instruction::FDiv: 2611 case Instruction::FRem: 2612 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2613 Depth + 1) && 2614 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2615 Depth + 1); 2616 case Instruction::Select: 2617 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2618 Depth + 1) && 2619 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2620 Depth + 1); 2621 case Instruction::FPExt: 2622 case Instruction::FPTrunc: 2623 // Widening/narrowing never change sign. 2624 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2625 Depth + 1); 2626 case Instruction::Call: 2627 const auto *CI = cast<CallInst>(I); 2628 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2629 switch (IID) { 2630 default: 2631 break; 2632 case Intrinsic::maxnum: 2633 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2634 Depth + 1) || 2635 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2636 Depth + 1); 2637 case Intrinsic::minnum: 2638 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2639 Depth + 1) && 2640 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2641 Depth + 1); 2642 case Intrinsic::exp: 2643 case Intrinsic::exp2: 2644 case Intrinsic::fabs: 2645 return true; 2646 2647 case Intrinsic::sqrt: 2648 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 2649 if (!SignBitOnly) 2650 return true; 2651 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 2652 CannotBeNegativeZero(CI->getOperand(0), TLI)); 2653 2654 case Intrinsic::powi: 2655 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 2656 // powi(x,n) is non-negative if n is even. 2657 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 2658 return true; 2659 } 2660 // TODO: This is not correct. Given that exp is an integer, here are the 2661 // ways that pow can return a negative value: 2662 // 2663 // pow(x, exp) --> negative if exp is odd and x is negative. 2664 // pow(-0, exp) --> -inf if exp is negative odd. 2665 // pow(-0, exp) --> -0 if exp is positive odd. 2666 // pow(-inf, exp) --> -0 if exp is negative odd. 2667 // pow(-inf, exp) --> -inf if exp is positive odd. 2668 // 2669 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 2670 // but we must return false if x == -0. Unfortunately we do not currently 2671 // have a way of expressing this constraint. See details in 2672 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2673 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2674 Depth + 1); 2675 2676 case Intrinsic::fma: 2677 case Intrinsic::fmuladd: 2678 // x*x+y is non-negative if y is non-negative. 2679 return I->getOperand(0) == I->getOperand(1) && 2680 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 2681 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2682 Depth + 1); 2683 } 2684 break; 2685 } 2686 return false; 2687 } 2688 2689 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 2690 const TargetLibraryInfo *TLI) { 2691 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 2692 } 2693 2694 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 2695 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 2696 } 2697 2698 /// If the specified value can be set by repeating the same byte in memory, 2699 /// return the i8 value that it is represented with. This is 2700 /// true for all i8 values obviously, but is also true for i32 0, i32 -1, 2701 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 2702 /// byte store (e.g. i16 0x1234), return null. 2703 Value *llvm::isBytewiseValue(Value *V) { 2704 // All byte-wide stores are splatable, even of arbitrary variables. 2705 if (V->getType()->isIntegerTy(8)) return V; 2706 2707 // Handle 'null' ConstantArrayZero etc. 2708 if (Constant *C = dyn_cast<Constant>(V)) 2709 if (C->isNullValue()) 2710 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 2711 2712 // Constant float and double values can be handled as integer values if the 2713 // corresponding integer value is "byteable". An important case is 0.0. 2714 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2715 if (CFP->getType()->isFloatTy()) 2716 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 2717 if (CFP->getType()->isDoubleTy()) 2718 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 2719 // Don't handle long double formats, which have strange constraints. 2720 } 2721 2722 // We can handle constant integers that are multiple of 8 bits. 2723 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 2724 if (CI->getBitWidth() % 8 == 0) { 2725 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 2726 2727 if (!CI->getValue().isSplat(8)) 2728 return nullptr; 2729 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8)); 2730 } 2731 } 2732 2733 // A ConstantDataArray/Vector is splatable if all its members are equal and 2734 // also splatable. 2735 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 2736 Value *Elt = CA->getElementAsConstant(0); 2737 Value *Val = isBytewiseValue(Elt); 2738 if (!Val) 2739 return nullptr; 2740 2741 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 2742 if (CA->getElementAsConstant(I) != Elt) 2743 return nullptr; 2744 2745 return Val; 2746 } 2747 2748 // Conceptually, we could handle things like: 2749 // %a = zext i8 %X to i16 2750 // %b = shl i16 %a, 8 2751 // %c = or i16 %a, %b 2752 // but until there is an example that actually needs this, it doesn't seem 2753 // worth worrying about. 2754 return nullptr; 2755 } 2756 2757 2758 // This is the recursive version of BuildSubAggregate. It takes a few different 2759 // arguments. Idxs is the index within the nested struct From that we are 2760 // looking at now (which is of type IndexedType). IdxSkip is the number of 2761 // indices from Idxs that should be left out when inserting into the resulting 2762 // struct. To is the result struct built so far, new insertvalue instructions 2763 // build on that. 2764 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 2765 SmallVectorImpl<unsigned> &Idxs, 2766 unsigned IdxSkip, 2767 Instruction *InsertBefore) { 2768 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType); 2769 if (STy) { 2770 // Save the original To argument so we can modify it 2771 Value *OrigTo = To; 2772 // General case, the type indexed by Idxs is a struct 2773 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2774 // Process each struct element recursively 2775 Idxs.push_back(i); 2776 Value *PrevTo = To; 2777 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 2778 InsertBefore); 2779 Idxs.pop_back(); 2780 if (!To) { 2781 // Couldn't find any inserted value for this index? Cleanup 2782 while (PrevTo != OrigTo) { 2783 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 2784 PrevTo = Del->getAggregateOperand(); 2785 Del->eraseFromParent(); 2786 } 2787 // Stop processing elements 2788 break; 2789 } 2790 } 2791 // If we successfully found a value for each of our subaggregates 2792 if (To) 2793 return To; 2794 } 2795 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 2796 // the struct's elements had a value that was inserted directly. In the latter 2797 // case, perhaps we can't determine each of the subelements individually, but 2798 // we might be able to find the complete struct somewhere. 2799 2800 // Find the value that is at that particular spot 2801 Value *V = FindInsertedValue(From, Idxs); 2802 2803 if (!V) 2804 return nullptr; 2805 2806 // Insert the value in the new (sub) aggregrate 2807 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 2808 "tmp", InsertBefore); 2809 } 2810 2811 // This helper takes a nested struct and extracts a part of it (which is again a 2812 // struct) into a new value. For example, given the struct: 2813 // { a, { b, { c, d }, e } } 2814 // and the indices "1, 1" this returns 2815 // { c, d }. 2816 // 2817 // It does this by inserting an insertvalue for each element in the resulting 2818 // struct, as opposed to just inserting a single struct. This will only work if 2819 // each of the elements of the substruct are known (ie, inserted into From by an 2820 // insertvalue instruction somewhere). 2821 // 2822 // All inserted insertvalue instructions are inserted before InsertBefore 2823 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 2824 Instruction *InsertBefore) { 2825 assert(InsertBefore && "Must have someplace to insert!"); 2826 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 2827 idx_range); 2828 Value *To = UndefValue::get(IndexedType); 2829 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 2830 unsigned IdxSkip = Idxs.size(); 2831 2832 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 2833 } 2834 2835 /// Given an aggregrate and an sequence of indices, see if 2836 /// the scalar value indexed is already around as a register, for example if it 2837 /// were inserted directly into the aggregrate. 2838 /// 2839 /// If InsertBefore is not null, this function will duplicate (modified) 2840 /// insertvalues when a part of a nested struct is extracted. 2841 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 2842 Instruction *InsertBefore) { 2843 // Nothing to index? Just return V then (this is useful at the end of our 2844 // recursion). 2845 if (idx_range.empty()) 2846 return V; 2847 // We have indices, so V should have an indexable type. 2848 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 2849 "Not looking at a struct or array?"); 2850 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 2851 "Invalid indices for type?"); 2852 2853 if (Constant *C = dyn_cast<Constant>(V)) { 2854 C = C->getAggregateElement(idx_range[0]); 2855 if (!C) return nullptr; 2856 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 2857 } 2858 2859 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 2860 // Loop the indices for the insertvalue instruction in parallel with the 2861 // requested indices 2862 const unsigned *req_idx = idx_range.begin(); 2863 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 2864 i != e; ++i, ++req_idx) { 2865 if (req_idx == idx_range.end()) { 2866 // We can't handle this without inserting insertvalues 2867 if (!InsertBefore) 2868 return nullptr; 2869 2870 // The requested index identifies a part of a nested aggregate. Handle 2871 // this specially. For example, 2872 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 2873 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 2874 // %C = extractvalue {i32, { i32, i32 } } %B, 1 2875 // This can be changed into 2876 // %A = insertvalue {i32, i32 } undef, i32 10, 0 2877 // %C = insertvalue {i32, i32 } %A, i32 11, 1 2878 // which allows the unused 0,0 element from the nested struct to be 2879 // removed. 2880 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 2881 InsertBefore); 2882 } 2883 2884 // This insert value inserts something else than what we are looking for. 2885 // See if the (aggregate) value inserted into has the value we are 2886 // looking for, then. 2887 if (*req_idx != *i) 2888 return FindInsertedValue(I->getAggregateOperand(), idx_range, 2889 InsertBefore); 2890 } 2891 // If we end up here, the indices of the insertvalue match with those 2892 // requested (though possibly only partially). Now we recursively look at 2893 // the inserted value, passing any remaining indices. 2894 return FindInsertedValue(I->getInsertedValueOperand(), 2895 makeArrayRef(req_idx, idx_range.end()), 2896 InsertBefore); 2897 } 2898 2899 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 2900 // If we're extracting a value from an aggregate that was extracted from 2901 // something else, we can extract from that something else directly instead. 2902 // However, we will need to chain I's indices with the requested indices. 2903 2904 // Calculate the number of indices required 2905 unsigned size = I->getNumIndices() + idx_range.size(); 2906 // Allocate some space to put the new indices in 2907 SmallVector<unsigned, 5> Idxs; 2908 Idxs.reserve(size); 2909 // Add indices from the extract value instruction 2910 Idxs.append(I->idx_begin(), I->idx_end()); 2911 2912 // Add requested indices 2913 Idxs.append(idx_range.begin(), idx_range.end()); 2914 2915 assert(Idxs.size() == size 2916 && "Number of indices added not correct?"); 2917 2918 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 2919 } 2920 // Otherwise, we don't know (such as, extracting from a function return value 2921 // or load instruction) 2922 return nullptr; 2923 } 2924 2925 /// Analyze the specified pointer to see if it can be expressed as a base 2926 /// pointer plus a constant offset. Return the base and offset to the caller. 2927 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 2928 const DataLayout &DL) { 2929 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType()); 2930 APInt ByteOffset(BitWidth, 0); 2931 2932 // We walk up the defs but use a visited set to handle unreachable code. In 2933 // that case, we stop after accumulating the cycle once (not that it 2934 // matters). 2935 SmallPtrSet<Value *, 16> Visited; 2936 while (Visited.insert(Ptr).second) { 2937 if (Ptr->getType()->isVectorTy()) 2938 break; 2939 2940 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 2941 // If one of the values we have visited is an addrspacecast, then 2942 // the pointer type of this GEP may be different from the type 2943 // of the Ptr parameter which was passed to this function. This 2944 // means when we construct GEPOffset, we need to use the size 2945 // of GEP's pointer type rather than the size of the original 2946 // pointer type. 2947 APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0); 2948 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 2949 break; 2950 2951 ByteOffset += GEPOffset.getSExtValue(); 2952 2953 Ptr = GEP->getPointerOperand(); 2954 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 2955 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 2956 Ptr = cast<Operator>(Ptr)->getOperand(0); 2957 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 2958 if (GA->isInterposable()) 2959 break; 2960 Ptr = GA->getAliasee(); 2961 } else { 2962 break; 2963 } 2964 } 2965 Offset = ByteOffset.getSExtValue(); 2966 return Ptr; 2967 } 2968 2969 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 2970 unsigned CharSize) { 2971 // Make sure the GEP has exactly three arguments. 2972 if (GEP->getNumOperands() != 3) 2973 return false; 2974 2975 // Make sure the index-ee is a pointer to array of \p CharSize integers. 2976 // CharSize. 2977 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 2978 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 2979 return false; 2980 2981 // Check to make sure that the first operand of the GEP is an integer and 2982 // has value 0 so that we are sure we're indexing into the initializer. 2983 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 2984 if (!FirstIdx || !FirstIdx->isZero()) 2985 return false; 2986 2987 return true; 2988 } 2989 2990 bool llvm::getConstantDataArrayInfo(const Value *V, 2991 ConstantDataArraySlice &Slice, 2992 unsigned ElementSize, uint64_t Offset) { 2993 assert(V); 2994 2995 // Look through bitcast instructions and geps. 2996 V = V->stripPointerCasts(); 2997 2998 // If the value is a GEP instruction or constant expression, treat it as an 2999 // offset. 3000 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3001 // The GEP operator should be based on a pointer to string constant, and is 3002 // indexing into the string constant. 3003 if (!isGEPBasedOnPointerToString(GEP, ElementSize)) 3004 return false; 3005 3006 // If the second index isn't a ConstantInt, then this is a variable index 3007 // into the array. If this occurs, we can't say anything meaningful about 3008 // the string. 3009 uint64_t StartIdx = 0; 3010 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 3011 StartIdx = CI->getZExtValue(); 3012 else 3013 return false; 3014 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize, 3015 StartIdx + Offset); 3016 } 3017 3018 // The GEP instruction, constant or instruction, must reference a global 3019 // variable that is a constant and is initialized. The referenced constant 3020 // initializer is the array that we'll use for optimization. 3021 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 3022 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 3023 return false; 3024 3025 const ConstantDataArray *Array; 3026 ArrayType *ArrayTy; 3027 if (GV->getInitializer()->isNullValue()) { 3028 Type *GVTy = GV->getValueType(); 3029 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) { 3030 // A zeroinitializer for the array; there is no ConstantDataArray. 3031 Array = nullptr; 3032 } else { 3033 const DataLayout &DL = GV->getParent()->getDataLayout(); 3034 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy); 3035 uint64_t Length = SizeInBytes / (ElementSize / 8); 3036 if (Length <= Offset) 3037 return false; 3038 3039 Slice.Array = nullptr; 3040 Slice.Offset = 0; 3041 Slice.Length = Length - Offset; 3042 return true; 3043 } 3044 } else { 3045 // This must be a ConstantDataArray. 3046 Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 3047 if (!Array) 3048 return false; 3049 ArrayTy = Array->getType(); 3050 } 3051 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize)) 3052 return false; 3053 3054 uint64_t NumElts = ArrayTy->getArrayNumElements(); 3055 if (Offset > NumElts) 3056 return false; 3057 3058 Slice.Array = Array; 3059 Slice.Offset = Offset; 3060 Slice.Length = NumElts - Offset; 3061 return true; 3062 } 3063 3064 /// This function computes the length of a null-terminated C string pointed to 3065 /// by V. If successful, it returns true and returns the string in Str. 3066 /// If unsuccessful, it returns false. 3067 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 3068 uint64_t Offset, bool TrimAtNul) { 3069 ConstantDataArraySlice Slice; 3070 if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) 3071 return false; 3072 3073 if (Slice.Array == nullptr) { 3074 if (TrimAtNul) { 3075 Str = StringRef(); 3076 return true; 3077 } 3078 if (Slice.Length == 1) { 3079 Str = StringRef("", 1); 3080 return true; 3081 } 3082 // We cannot instantiate a StringRef as we do not have an appropriate string 3083 // of 0s at hand. 3084 return false; 3085 } 3086 3087 // Start out with the entire array in the StringRef. 3088 Str = Slice.Array->getAsString(); 3089 // Skip over 'offset' bytes. 3090 Str = Str.substr(Slice.Offset); 3091 3092 if (TrimAtNul) { 3093 // Trim off the \0 and anything after it. If the array is not nul 3094 // terminated, we just return the whole end of string. The client may know 3095 // some other way that the string is length-bound. 3096 Str = Str.substr(0, Str.find('\0')); 3097 } 3098 return true; 3099 } 3100 3101 // These next two are very similar to the above, but also look through PHI 3102 // nodes. 3103 // TODO: See if we can integrate these two together. 3104 3105 /// If we can compute the length of the string pointed to by 3106 /// the specified pointer, return 'len+1'. If we can't, return 0. 3107 static uint64_t GetStringLengthH(const Value *V, 3108 SmallPtrSetImpl<const PHINode*> &PHIs, 3109 unsigned CharSize) { 3110 // Look through noop bitcast instructions. 3111 V = V->stripPointerCasts(); 3112 3113 // If this is a PHI node, there are two cases: either we have already seen it 3114 // or we haven't. 3115 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 3116 if (!PHIs.insert(PN).second) 3117 return ~0ULL; // already in the set. 3118 3119 // If it was new, see if all the input strings are the same length. 3120 uint64_t LenSoFar = ~0ULL; 3121 for (Value *IncValue : PN->incoming_values()) { 3122 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 3123 if (Len == 0) return 0; // Unknown length -> unknown. 3124 3125 if (Len == ~0ULL) continue; 3126 3127 if (Len != LenSoFar && LenSoFar != ~0ULL) 3128 return 0; // Disagree -> unknown. 3129 LenSoFar = Len; 3130 } 3131 3132 // Success, all agree. 3133 return LenSoFar; 3134 } 3135 3136 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 3137 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 3138 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 3139 if (Len1 == 0) return 0; 3140 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 3141 if (Len2 == 0) return 0; 3142 if (Len1 == ~0ULL) return Len2; 3143 if (Len2 == ~0ULL) return Len1; 3144 if (Len1 != Len2) return 0; 3145 return Len1; 3146 } 3147 3148 // Otherwise, see if we can read the string. 3149 ConstantDataArraySlice Slice; 3150 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 3151 return 0; 3152 3153 if (Slice.Array == nullptr) 3154 return 1; 3155 3156 // Search for nul characters 3157 unsigned NullIndex = 0; 3158 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 3159 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 3160 break; 3161 } 3162 3163 return NullIndex + 1; 3164 } 3165 3166 /// If we can compute the length of the string pointed to by 3167 /// the specified pointer, return 'len+1'. If we can't, return 0. 3168 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 3169 if (!V->getType()->isPointerTy()) return 0; 3170 3171 SmallPtrSet<const PHINode*, 32> PHIs; 3172 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 3173 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 3174 // an empty string as a length. 3175 return Len == ~0ULL ? 1 : Len; 3176 } 3177 3178 /// \brief \p PN defines a loop-variant pointer to an object. Check if the 3179 /// previous iteration of the loop was referring to the same object as \p PN. 3180 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 3181 const LoopInfo *LI) { 3182 // Find the loop-defined value. 3183 Loop *L = LI->getLoopFor(PN->getParent()); 3184 if (PN->getNumIncomingValues() != 2) 3185 return true; 3186 3187 // Find the value from previous iteration. 3188 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 3189 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3190 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 3191 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3192 return true; 3193 3194 // If a new pointer is loaded in the loop, the pointer references a different 3195 // object in every iteration. E.g.: 3196 // for (i) 3197 // int *p = a[i]; 3198 // ... 3199 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 3200 if (!L->isLoopInvariant(Load->getPointerOperand())) 3201 return false; 3202 return true; 3203 } 3204 3205 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 3206 unsigned MaxLookup) { 3207 if (!V->getType()->isPointerTy()) 3208 return V; 3209 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 3210 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3211 V = GEP->getPointerOperand(); 3212 } else if (Operator::getOpcode(V) == Instruction::BitCast || 3213 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 3214 V = cast<Operator>(V)->getOperand(0); 3215 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 3216 if (GA->isInterposable()) 3217 return V; 3218 V = GA->getAliasee(); 3219 } else if (isa<AllocaInst>(V)) { 3220 // An alloca can't be further simplified. 3221 return V; 3222 } else { 3223 if (auto CS = CallSite(V)) 3224 if (Value *RV = CS.getReturnedArgOperand()) { 3225 V = RV; 3226 continue; 3227 } 3228 3229 // See if InstructionSimplify knows any relevant tricks. 3230 if (Instruction *I = dyn_cast<Instruction>(V)) 3231 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 3232 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) { 3233 V = Simplified; 3234 continue; 3235 } 3236 3237 return V; 3238 } 3239 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 3240 } 3241 return V; 3242 } 3243 3244 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 3245 const DataLayout &DL, LoopInfo *LI, 3246 unsigned MaxLookup) { 3247 SmallPtrSet<Value *, 4> Visited; 3248 SmallVector<Value *, 4> Worklist; 3249 Worklist.push_back(V); 3250 do { 3251 Value *P = Worklist.pop_back_val(); 3252 P = GetUnderlyingObject(P, DL, MaxLookup); 3253 3254 if (!Visited.insert(P).second) 3255 continue; 3256 3257 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 3258 Worklist.push_back(SI->getTrueValue()); 3259 Worklist.push_back(SI->getFalseValue()); 3260 continue; 3261 } 3262 3263 if (PHINode *PN = dyn_cast<PHINode>(P)) { 3264 // If this PHI changes the underlying object in every iteration of the 3265 // loop, don't look through it. Consider: 3266 // int **A; 3267 // for (i) { 3268 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 3269 // Curr = A[i]; 3270 // *Prev, *Curr; 3271 // 3272 // Prev is tracking Curr one iteration behind so they refer to different 3273 // underlying objects. 3274 if (!LI || !LI->isLoopHeader(PN->getParent()) || 3275 isSameUnderlyingObjectInLoop(PN, LI)) 3276 for (Value *IncValue : PN->incoming_values()) 3277 Worklist.push_back(IncValue); 3278 continue; 3279 } 3280 3281 Objects.push_back(P); 3282 } while (!Worklist.empty()); 3283 } 3284 3285 /// Return true if the only users of this pointer are lifetime markers. 3286 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 3287 for (const User *U : V->users()) { 3288 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3289 if (!II) return false; 3290 3291 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 3292 II->getIntrinsicID() != Intrinsic::lifetime_end) 3293 return false; 3294 } 3295 return true; 3296 } 3297 3298 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3299 const Instruction *CtxI, 3300 const DominatorTree *DT) { 3301 const Operator *Inst = dyn_cast<Operator>(V); 3302 if (!Inst) 3303 return false; 3304 3305 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3306 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3307 if (C->canTrap()) 3308 return false; 3309 3310 switch (Inst->getOpcode()) { 3311 default: 3312 return true; 3313 case Instruction::UDiv: 3314 case Instruction::URem: { 3315 // x / y is undefined if y == 0. 3316 const APInt *V; 3317 if (match(Inst->getOperand(1), m_APInt(V))) 3318 return *V != 0; 3319 return false; 3320 } 3321 case Instruction::SDiv: 3322 case Instruction::SRem: { 3323 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3324 const APInt *Numerator, *Denominator; 3325 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3326 return false; 3327 // We cannot hoist this division if the denominator is 0. 3328 if (*Denominator == 0) 3329 return false; 3330 // It's safe to hoist if the denominator is not 0 or -1. 3331 if (*Denominator != -1) 3332 return true; 3333 // At this point we know that the denominator is -1. It is safe to hoist as 3334 // long we know that the numerator is not INT_MIN. 3335 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3336 return !Numerator->isMinSignedValue(); 3337 // The numerator *might* be MinSignedValue. 3338 return false; 3339 } 3340 case Instruction::Load: { 3341 const LoadInst *LI = cast<LoadInst>(Inst); 3342 if (!LI->isUnordered() || 3343 // Speculative load may create a race that did not exist in the source. 3344 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) || 3345 // Speculative load may load data from dirty regions. 3346 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress)) 3347 return false; 3348 const DataLayout &DL = LI->getModule()->getDataLayout(); 3349 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), 3350 LI->getAlignment(), DL, CtxI, DT); 3351 } 3352 case Instruction::Call: { 3353 auto *CI = cast<const CallInst>(Inst); 3354 const Function *Callee = CI->getCalledFunction(); 3355 3356 // The called function could have undefined behavior or side-effects, even 3357 // if marked readnone nounwind. 3358 return Callee && Callee->isSpeculatable(); 3359 } 3360 case Instruction::VAArg: 3361 case Instruction::Alloca: 3362 case Instruction::Invoke: 3363 case Instruction::PHI: 3364 case Instruction::Store: 3365 case Instruction::Ret: 3366 case Instruction::Br: 3367 case Instruction::IndirectBr: 3368 case Instruction::Switch: 3369 case Instruction::Unreachable: 3370 case Instruction::Fence: 3371 case Instruction::AtomicRMW: 3372 case Instruction::AtomicCmpXchg: 3373 case Instruction::LandingPad: 3374 case Instruction::Resume: 3375 case Instruction::CatchSwitch: 3376 case Instruction::CatchPad: 3377 case Instruction::CatchRet: 3378 case Instruction::CleanupPad: 3379 case Instruction::CleanupRet: 3380 return false; // Misc instructions which have effects 3381 } 3382 } 3383 3384 bool llvm::mayBeMemoryDependent(const Instruction &I) { 3385 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 3386 } 3387 3388 /// Return true if we know that the specified value is never null. 3389 bool llvm::isKnownNonNull(const Value *V) { 3390 assert(V->getType()->isPointerTy() && "V must be pointer type"); 3391 3392 // Alloca never returns null, malloc might. 3393 if (isa<AllocaInst>(V)) return true; 3394 3395 // A byval, inalloca, or nonnull argument is never null. 3396 if (const Argument *A = dyn_cast<Argument>(V)) 3397 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr(); 3398 3399 // A global variable in address space 0 is non null unless extern weak 3400 // or an absolute symbol reference. Other address spaces may have null as a 3401 // valid address for a global, so we can't assume anything. 3402 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 3403 return !GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 3404 GV->getType()->getAddressSpace() == 0; 3405 3406 // A Load tagged with nonnull metadata is never null. 3407 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 3408 return LI->getMetadata(LLVMContext::MD_nonnull); 3409 3410 if (auto CS = ImmutableCallSite(V)) 3411 if (CS.isReturnNonNull()) 3412 return true; 3413 3414 return false; 3415 } 3416 3417 static bool isKnownNonNullFromDominatingCondition(const Value *V, 3418 const Instruction *CtxI, 3419 const DominatorTree *DT) { 3420 assert(V->getType()->isPointerTy() && "V must be pointer type"); 3421 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull"); 3422 assert(CtxI && "Context instruction required for analysis"); 3423 assert(DT && "Dominator tree required for analysis"); 3424 3425 unsigned NumUsesExplored = 0; 3426 for (auto *U : V->users()) { 3427 // Avoid massive lists 3428 if (NumUsesExplored >= DomConditionsMaxUses) 3429 break; 3430 NumUsesExplored++; 3431 3432 // If the value is used as an argument to a call or invoke, then argument 3433 // attributes may provide an answer about null-ness. 3434 if (auto CS = ImmutableCallSite(U)) 3435 if (auto *CalledFunc = CS.getCalledFunction()) 3436 for (const Argument &Arg : CalledFunc->args()) 3437 if (CS.getArgOperand(Arg.getArgNo()) == V && 3438 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) 3439 return true; 3440 3441 // Consider only compare instructions uniquely controlling a branch 3442 CmpInst::Predicate Pred; 3443 if (!match(const_cast<User *>(U), 3444 m_c_ICmp(Pred, m_Specific(V), m_Zero())) || 3445 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) 3446 continue; 3447 3448 for (auto *CmpU : U->users()) { 3449 if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) { 3450 assert(BI->isConditional() && "uses a comparison!"); 3451 3452 BasicBlock *NonNullSuccessor = 3453 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); 3454 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 3455 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 3456 return true; 3457 } else if (Pred == ICmpInst::ICMP_NE && 3458 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) && 3459 DT->dominates(cast<Instruction>(CmpU), CtxI)) { 3460 return true; 3461 } 3462 } 3463 } 3464 3465 return false; 3466 } 3467 3468 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI, 3469 const DominatorTree *DT) { 3470 if (isa<ConstantPointerNull>(V) || isa<UndefValue>(V)) 3471 return false; 3472 3473 if (isKnownNonNull(V)) 3474 return true; 3475 3476 if (!CtxI || !DT) 3477 return false; 3478 3479 return ::isKnownNonNullFromDominatingCondition(V, CtxI, DT); 3480 } 3481 3482 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS, 3483 const Value *RHS, 3484 const DataLayout &DL, 3485 AssumptionCache *AC, 3486 const Instruction *CxtI, 3487 const DominatorTree *DT) { 3488 // Multiplying n * m significant bits yields a result of n + m significant 3489 // bits. If the total number of significant bits does not exceed the 3490 // result bit width (minus 1), there is no overflow. 3491 // This means if we have enough leading zero bits in the operands 3492 // we can guarantee that the result does not overflow. 3493 // Ref: "Hacker's Delight" by Henry Warren 3494 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3495 KnownBits LHSKnown(BitWidth); 3496 KnownBits RHSKnown(BitWidth); 3497 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT); 3498 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT); 3499 // Note that underestimating the number of zero bits gives a more 3500 // conservative answer. 3501 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() + 3502 RHSKnown.countMinLeadingZeros(); 3503 // First handle the easy case: if we have enough zero bits there's 3504 // definitely no overflow. 3505 if (ZeroBits >= BitWidth) 3506 return OverflowResult::NeverOverflows; 3507 3508 // Get the largest possible values for each operand. 3509 APInt LHSMax = ~LHSKnown.Zero; 3510 APInt RHSMax = ~RHSKnown.Zero; 3511 3512 // We know the multiply operation doesn't overflow if the maximum values for 3513 // each operand will not overflow after we multiply them together. 3514 bool MaxOverflow; 3515 (void)LHSMax.umul_ov(RHSMax, MaxOverflow); 3516 if (!MaxOverflow) 3517 return OverflowResult::NeverOverflows; 3518 3519 // We know it always overflows if multiplying the smallest possible values for 3520 // the operands also results in overflow. 3521 bool MinOverflow; 3522 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow); 3523 if (MinOverflow) 3524 return OverflowResult::AlwaysOverflows; 3525 3526 return OverflowResult::MayOverflow; 3527 } 3528 3529 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS, 3530 const Value *RHS, 3531 const DataLayout &DL, 3532 AssumptionCache *AC, 3533 const Instruction *CxtI, 3534 const DominatorTree *DT) { 3535 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 3536 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) { 3537 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 3538 3539 if (LHSKnown.isNegative() && RHSKnown.isNegative()) { 3540 // The sign bit is set in both cases: this MUST overflow. 3541 // Create a simple add instruction, and insert it into the struct. 3542 return OverflowResult::AlwaysOverflows; 3543 } 3544 3545 if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) { 3546 // The sign bit is clear in both cases: this CANNOT overflow. 3547 // Create a simple add instruction, and insert it into the struct. 3548 return OverflowResult::NeverOverflows; 3549 } 3550 } 3551 3552 return OverflowResult::MayOverflow; 3553 } 3554 3555 /// \brief Return true if we can prove that adding the two values of the 3556 /// knownbits will not overflow. 3557 /// Otherwise return false. 3558 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown, 3559 const KnownBits &RHSKnown) { 3560 // Addition of two 2's complement numbers having opposite signs will never 3561 // overflow. 3562 if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) || 3563 (LHSKnown.isNonNegative() && RHSKnown.isNegative())) 3564 return true; 3565 3566 // If either of the values is known to be non-negative, adding them can only 3567 // overflow if the second is also non-negative, so we can assume that. 3568 // Two non-negative numbers will only overflow if there is a carry to the 3569 // sign bit, so we can check if even when the values are as big as possible 3570 // there is no overflow to the sign bit. 3571 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) { 3572 APInt MaxLHS = ~LHSKnown.Zero; 3573 MaxLHS.clearSignBit(); 3574 APInt MaxRHS = ~RHSKnown.Zero; 3575 MaxRHS.clearSignBit(); 3576 APInt Result = std::move(MaxLHS) + std::move(MaxRHS); 3577 return Result.isSignBitClear(); 3578 } 3579 3580 // If either of the values is known to be negative, adding them can only 3581 // overflow if the second is also negative, so we can assume that. 3582 // Two negative number will only overflow if there is no carry to the sign 3583 // bit, so we can check if even when the values are as small as possible 3584 // there is overflow to the sign bit. 3585 if (LHSKnown.isNegative() || RHSKnown.isNegative()) { 3586 APInt MinLHS = LHSKnown.One; 3587 MinLHS.clearSignBit(); 3588 APInt MinRHS = RHSKnown.One; 3589 MinRHS.clearSignBit(); 3590 APInt Result = std::move(MinLHS) + std::move(MinRHS); 3591 return Result.isSignBitSet(); 3592 } 3593 3594 // If we reached here it means that we know nothing about the sign bits. 3595 // In this case we can't know if there will be an overflow, since by 3596 // changing the sign bits any two values can be made to overflow. 3597 return false; 3598 } 3599 3600 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 3601 const Value *RHS, 3602 const AddOperator *Add, 3603 const DataLayout &DL, 3604 AssumptionCache *AC, 3605 const Instruction *CxtI, 3606 const DominatorTree *DT) { 3607 if (Add && Add->hasNoSignedWrap()) { 3608 return OverflowResult::NeverOverflows; 3609 } 3610 3611 // If LHS and RHS each have at least two sign bits, the addition will look 3612 // like 3613 // 3614 // XX..... + 3615 // YY..... 3616 // 3617 // If the carry into the most significant position is 0, X and Y can't both 3618 // be 1 and therefore the carry out of the addition is also 0. 3619 // 3620 // If the carry into the most significant position is 1, X and Y can't both 3621 // be 0 and therefore the carry out of the addition is also 1. 3622 // 3623 // Since the carry into the most significant position is always equal to 3624 // the carry out of the addition, there is no signed overflow. 3625 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 3626 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 3627 return OverflowResult::NeverOverflows; 3628 3629 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 3630 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 3631 3632 if (checkRippleForSignedAdd(LHSKnown, RHSKnown)) 3633 return OverflowResult::NeverOverflows; 3634 3635 // The remaining code needs Add to be available. Early returns if not so. 3636 if (!Add) 3637 return OverflowResult::MayOverflow; 3638 3639 // If the sign of Add is the same as at least one of the operands, this add 3640 // CANNOT overflow. This is particularly useful when the sum is 3641 // @llvm.assume'ed non-negative rather than proved so from analyzing its 3642 // operands. 3643 bool LHSOrRHSKnownNonNegative = 3644 (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()); 3645 bool LHSOrRHSKnownNegative = 3646 (LHSKnown.isNegative() || RHSKnown.isNegative()); 3647 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 3648 KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT); 3649 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 3650 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) { 3651 return OverflowResult::NeverOverflows; 3652 } 3653 } 3654 3655 return OverflowResult::MayOverflow; 3656 } 3657 3658 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, 3659 const DominatorTree &DT) { 3660 #ifndef NDEBUG 3661 auto IID = II->getIntrinsicID(); 3662 assert((IID == Intrinsic::sadd_with_overflow || 3663 IID == Intrinsic::uadd_with_overflow || 3664 IID == Intrinsic::ssub_with_overflow || 3665 IID == Intrinsic::usub_with_overflow || 3666 IID == Intrinsic::smul_with_overflow || 3667 IID == Intrinsic::umul_with_overflow) && 3668 "Not an overflow intrinsic!"); 3669 #endif 3670 3671 SmallVector<const BranchInst *, 2> GuardingBranches; 3672 SmallVector<const ExtractValueInst *, 2> Results; 3673 3674 for (const User *U : II->users()) { 3675 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 3676 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 3677 3678 if (EVI->getIndices()[0] == 0) 3679 Results.push_back(EVI); 3680 else { 3681 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 3682 3683 for (const auto *U : EVI->users()) 3684 if (const auto *B = dyn_cast<BranchInst>(U)) { 3685 assert(B->isConditional() && "How else is it using an i1?"); 3686 GuardingBranches.push_back(B); 3687 } 3688 } 3689 } else { 3690 // We are using the aggregate directly in a way we don't want to analyze 3691 // here (storing it to a global, say). 3692 return false; 3693 } 3694 } 3695 3696 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 3697 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 3698 if (!NoWrapEdge.isSingleEdge()) 3699 return false; 3700 3701 // Check if all users of the add are provably no-wrap. 3702 for (const auto *Result : Results) { 3703 // If the extractvalue itself is not executed on overflow, the we don't 3704 // need to check each use separately, since domination is transitive. 3705 if (DT.dominates(NoWrapEdge, Result->getParent())) 3706 continue; 3707 3708 for (auto &RU : Result->uses()) 3709 if (!DT.dominates(NoWrapEdge, RU)) 3710 return false; 3711 } 3712 3713 return true; 3714 }; 3715 3716 return any_of(GuardingBranches, AllUsesGuardedByBranch); 3717 } 3718 3719 3720 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 3721 const DataLayout &DL, 3722 AssumptionCache *AC, 3723 const Instruction *CxtI, 3724 const DominatorTree *DT) { 3725 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 3726 Add, DL, AC, CxtI, DT); 3727 } 3728 3729 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 3730 const Value *RHS, 3731 const DataLayout &DL, 3732 AssumptionCache *AC, 3733 const Instruction *CxtI, 3734 const DominatorTree *DT) { 3735 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 3736 } 3737 3738 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 3739 // A memory operation returns normally if it isn't volatile. A volatile 3740 // operation is allowed to trap. 3741 // 3742 // An atomic operation isn't guaranteed to return in a reasonable amount of 3743 // time because it's possible for another thread to interfere with it for an 3744 // arbitrary length of time, but programs aren't allowed to rely on that. 3745 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 3746 return !LI->isVolatile(); 3747 if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 3748 return !SI->isVolatile(); 3749 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 3750 return !CXI->isVolatile(); 3751 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 3752 return !RMWI->isVolatile(); 3753 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I)) 3754 return !MII->isVolatile(); 3755 3756 // If there is no successor, then execution can't transfer to it. 3757 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) 3758 return !CRI->unwindsToCaller(); 3759 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) 3760 return !CatchSwitch->unwindsToCaller(); 3761 if (isa<ResumeInst>(I)) 3762 return false; 3763 if (isa<ReturnInst>(I)) 3764 return false; 3765 if (isa<UnreachableInst>(I)) 3766 return false; 3767 3768 // Calls can throw, or contain an infinite loop, or kill the process. 3769 if (auto CS = ImmutableCallSite(I)) { 3770 // Call sites that throw have implicit non-local control flow. 3771 if (!CS.doesNotThrow()) 3772 return false; 3773 3774 // Non-throwing call sites can loop infinitely, call exit/pthread_exit 3775 // etc. and thus not return. However, LLVM already assumes that 3776 // 3777 // - Thread exiting actions are modeled as writes to memory invisible to 3778 // the program. 3779 // 3780 // - Loops that don't have side effects (side effects are volatile/atomic 3781 // stores and IO) always terminate (see http://llvm.org/PR965). 3782 // Furthermore IO itself is also modeled as writes to memory invisible to 3783 // the program. 3784 // 3785 // We rely on those assumptions here, and use the memory effects of the call 3786 // target as a proxy for checking that it always returns. 3787 3788 // FIXME: This isn't aggressive enough; a call which only writes to a global 3789 // is guaranteed to return. 3790 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() || 3791 match(I, m_Intrinsic<Intrinsic::assume>()); 3792 } 3793 3794 // Other instructions return normally. 3795 return true; 3796 } 3797 3798 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 3799 const Loop *L) { 3800 // The loop header is guaranteed to be executed for every iteration. 3801 // 3802 // FIXME: Relax this constraint to cover all basic blocks that are 3803 // guaranteed to be executed at every iteration. 3804 if (I->getParent() != L->getHeader()) return false; 3805 3806 for (const Instruction &LI : *L->getHeader()) { 3807 if (&LI == I) return true; 3808 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 3809 } 3810 llvm_unreachable("Instruction not contained in its own parent basic block."); 3811 } 3812 3813 bool llvm::propagatesFullPoison(const Instruction *I) { 3814 switch (I->getOpcode()) { 3815 case Instruction::Add: 3816 case Instruction::Sub: 3817 case Instruction::Xor: 3818 case Instruction::Trunc: 3819 case Instruction::BitCast: 3820 case Instruction::AddrSpaceCast: 3821 case Instruction::Mul: 3822 case Instruction::Shl: 3823 case Instruction::GetElementPtr: 3824 // These operations all propagate poison unconditionally. Note that poison 3825 // is not any particular value, so xor or subtraction of poison with 3826 // itself still yields poison, not zero. 3827 return true; 3828 3829 case Instruction::AShr: 3830 case Instruction::SExt: 3831 // For these operations, one bit of the input is replicated across 3832 // multiple output bits. A replicated poison bit is still poison. 3833 return true; 3834 3835 case Instruction::ICmp: 3836 // Comparing poison with any value yields poison. This is why, for 3837 // instance, x s< (x +nsw 1) can be folded to true. 3838 return true; 3839 3840 default: 3841 return false; 3842 } 3843 } 3844 3845 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) { 3846 switch (I->getOpcode()) { 3847 case Instruction::Store: 3848 return cast<StoreInst>(I)->getPointerOperand(); 3849 3850 case Instruction::Load: 3851 return cast<LoadInst>(I)->getPointerOperand(); 3852 3853 case Instruction::AtomicCmpXchg: 3854 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 3855 3856 case Instruction::AtomicRMW: 3857 return cast<AtomicRMWInst>(I)->getPointerOperand(); 3858 3859 case Instruction::UDiv: 3860 case Instruction::SDiv: 3861 case Instruction::URem: 3862 case Instruction::SRem: 3863 return I->getOperand(1); 3864 3865 default: 3866 return nullptr; 3867 } 3868 } 3869 3870 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) { 3871 // We currently only look for uses of poison values within the same basic 3872 // block, as that makes it easier to guarantee that the uses will be 3873 // executed given that PoisonI is executed. 3874 // 3875 // FIXME: Expand this to consider uses beyond the same basic block. To do 3876 // this, look out for the distinction between post-dominance and strong 3877 // post-dominance. 3878 const BasicBlock *BB = PoisonI->getParent(); 3879 3880 // Set of instructions that we have proved will yield poison if PoisonI 3881 // does. 3882 SmallSet<const Value *, 16> YieldsPoison; 3883 SmallSet<const BasicBlock *, 4> Visited; 3884 YieldsPoison.insert(PoisonI); 3885 Visited.insert(PoisonI->getParent()); 3886 3887 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end(); 3888 3889 unsigned Iter = 0; 3890 while (Iter++ < MaxDepth) { 3891 for (auto &I : make_range(Begin, End)) { 3892 if (&I != PoisonI) { 3893 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I); 3894 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) 3895 return true; 3896 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 3897 return false; 3898 } 3899 3900 // Mark poison that propagates from I through uses of I. 3901 if (YieldsPoison.count(&I)) { 3902 for (const User *User : I.users()) { 3903 const Instruction *UserI = cast<Instruction>(User); 3904 if (propagatesFullPoison(UserI)) 3905 YieldsPoison.insert(User); 3906 } 3907 } 3908 } 3909 3910 if (auto *NextBB = BB->getSingleSuccessor()) { 3911 if (Visited.insert(NextBB).second) { 3912 BB = NextBB; 3913 Begin = BB->getFirstNonPHI()->getIterator(); 3914 End = BB->end(); 3915 continue; 3916 } 3917 } 3918 3919 break; 3920 }; 3921 return false; 3922 } 3923 3924 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 3925 if (FMF.noNaNs()) 3926 return true; 3927 3928 if (auto *C = dyn_cast<ConstantFP>(V)) 3929 return !C->isNaN(); 3930 return false; 3931 } 3932 3933 static bool isKnownNonZero(const Value *V) { 3934 if (auto *C = dyn_cast<ConstantFP>(V)) 3935 return !C->isZero(); 3936 return false; 3937 } 3938 3939 /// Match non-obvious integer minimum and maximum sequences. 3940 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 3941 Value *CmpLHS, Value *CmpRHS, 3942 Value *TrueVal, Value *FalseVal, 3943 Value *&LHS, Value *&RHS) { 3944 // Assume success. If there's no match, callers should not use these anyway. 3945 LHS = TrueVal; 3946 RHS = FalseVal; 3947 3948 // Recognize variations of: 3949 // CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 3950 const APInt *C1; 3951 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 3952 const APInt *C2; 3953 3954 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 3955 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 3956 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 3957 return {SPF_SMAX, SPNB_NA, false}; 3958 3959 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 3960 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 3961 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 3962 return {SPF_SMIN, SPNB_NA, false}; 3963 3964 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 3965 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 3966 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 3967 return {SPF_UMAX, SPNB_NA, false}; 3968 3969 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 3970 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 3971 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 3972 return {SPF_UMIN, SPNB_NA, false}; 3973 } 3974 3975 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 3976 return {SPF_UNKNOWN, SPNB_NA, false}; 3977 3978 // Z = X -nsw Y 3979 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) 3980 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) 3981 if (match(TrueVal, m_Zero()) && 3982 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 3983 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 3984 3985 // Z = X -nsw Y 3986 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) 3987 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) 3988 if (match(FalseVal, m_Zero()) && 3989 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 3990 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 3991 3992 if (!match(CmpRHS, m_APInt(C1))) 3993 return {SPF_UNKNOWN, SPNB_NA, false}; 3994 3995 // An unsigned min/max can be written with a signed compare. 3996 const APInt *C2; 3997 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 3998 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 3999 // Is the sign bit set? 4000 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 4001 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 4002 if (Pred == CmpInst::ICMP_SLT && *C1 == 0 && C2->isMaxSignedValue()) 4003 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4004 4005 // Is the sign bit clear? 4006 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 4007 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 4008 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && 4009 C2->isMinSignedValue()) 4010 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4011 } 4012 4013 // Look through 'not' ops to find disguised signed min/max. 4014 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C) 4015 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C) 4016 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) && 4017 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2) 4018 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4019 4020 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X) 4021 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X) 4022 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) && 4023 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2) 4024 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4025 4026 return {SPF_UNKNOWN, SPNB_NA, false}; 4027 } 4028 4029 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 4030 FastMathFlags FMF, 4031 Value *CmpLHS, Value *CmpRHS, 4032 Value *TrueVal, Value *FalseVal, 4033 Value *&LHS, Value *&RHS) { 4034 LHS = CmpLHS; 4035 RHS = CmpRHS; 4036 4037 // If the predicate is an "or-equal" (FP) predicate, then signed zeroes may 4038 // return inconsistent results between implementations. 4039 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 4040 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 4041 // Therefore we behave conservatively and only proceed if at least one of the 4042 // operands is known to not be zero, or if we don't care about signed zeroes. 4043 switch (Pred) { 4044 default: break; 4045 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 4046 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 4047 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4048 !isKnownNonZero(CmpRHS)) 4049 return {SPF_UNKNOWN, SPNB_NA, false}; 4050 } 4051 4052 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 4053 bool Ordered = false; 4054 4055 // When given one NaN and one non-NaN input: 4056 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 4057 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 4058 // ordered comparison fails), which could be NaN or non-NaN. 4059 // so here we discover exactly what NaN behavior is required/accepted. 4060 if (CmpInst::isFPPredicate(Pred)) { 4061 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 4062 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 4063 4064 if (LHSSafe && RHSSafe) { 4065 // Both operands are known non-NaN. 4066 NaNBehavior = SPNB_RETURNS_ANY; 4067 } else if (CmpInst::isOrdered(Pred)) { 4068 // An ordered comparison will return false when given a NaN, so it 4069 // returns the RHS. 4070 Ordered = true; 4071 if (LHSSafe) 4072 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 4073 NaNBehavior = SPNB_RETURNS_NAN; 4074 else if (RHSSafe) 4075 NaNBehavior = SPNB_RETURNS_OTHER; 4076 else 4077 // Completely unsafe. 4078 return {SPF_UNKNOWN, SPNB_NA, false}; 4079 } else { 4080 Ordered = false; 4081 // An unordered comparison will return true when given a NaN, so it 4082 // returns the LHS. 4083 if (LHSSafe) 4084 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 4085 NaNBehavior = SPNB_RETURNS_OTHER; 4086 else if (RHSSafe) 4087 NaNBehavior = SPNB_RETURNS_NAN; 4088 else 4089 // Completely unsafe. 4090 return {SPF_UNKNOWN, SPNB_NA, false}; 4091 } 4092 } 4093 4094 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 4095 std::swap(CmpLHS, CmpRHS); 4096 Pred = CmpInst::getSwappedPredicate(Pred); 4097 if (NaNBehavior == SPNB_RETURNS_NAN) 4098 NaNBehavior = SPNB_RETURNS_OTHER; 4099 else if (NaNBehavior == SPNB_RETURNS_OTHER) 4100 NaNBehavior = SPNB_RETURNS_NAN; 4101 Ordered = !Ordered; 4102 } 4103 4104 // ([if]cmp X, Y) ? X : Y 4105 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 4106 switch (Pred) { 4107 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 4108 case ICmpInst::ICMP_UGT: 4109 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 4110 case ICmpInst::ICMP_SGT: 4111 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 4112 case ICmpInst::ICMP_ULT: 4113 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 4114 case ICmpInst::ICMP_SLT: 4115 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 4116 case FCmpInst::FCMP_UGT: 4117 case FCmpInst::FCMP_UGE: 4118 case FCmpInst::FCMP_OGT: 4119 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 4120 case FCmpInst::FCMP_ULT: 4121 case FCmpInst::FCMP_ULE: 4122 case FCmpInst::FCMP_OLT: 4123 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 4124 } 4125 } 4126 4127 const APInt *C1; 4128 if (match(CmpRHS, m_APInt(C1))) { 4129 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) || 4130 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) { 4131 4132 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X 4133 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X 4134 if (Pred == ICmpInst::ICMP_SGT && (*C1 == 0 || C1->isAllOnesValue())) { 4135 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 4136 } 4137 4138 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X 4139 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X 4140 if (Pred == ICmpInst::ICMP_SLT && (*C1 == 0 || *C1 == 1)) { 4141 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 4142 } 4143 } 4144 } 4145 4146 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 4147 } 4148 4149 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 4150 Instruction::CastOps *CastOp) { 4151 auto *Cast1 = dyn_cast<CastInst>(V1); 4152 if (!Cast1) 4153 return nullptr; 4154 4155 *CastOp = Cast1->getOpcode(); 4156 Type *SrcTy = Cast1->getSrcTy(); 4157 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 4158 // If V1 and V2 are both the same cast from the same type, look through V1. 4159 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 4160 return Cast2->getOperand(0); 4161 return nullptr; 4162 } 4163 4164 auto *C = dyn_cast<Constant>(V2); 4165 if (!C) 4166 return nullptr; 4167 4168 Constant *CastedTo = nullptr; 4169 switch (*CastOp) { 4170 case Instruction::ZExt: 4171 if (CmpI->isUnsigned()) 4172 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 4173 break; 4174 case Instruction::SExt: 4175 if (CmpI->isSigned()) 4176 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 4177 break; 4178 case Instruction::Trunc: 4179 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 4180 break; 4181 case Instruction::FPTrunc: 4182 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 4183 break; 4184 case Instruction::FPExt: 4185 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 4186 break; 4187 case Instruction::FPToUI: 4188 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 4189 break; 4190 case Instruction::FPToSI: 4191 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 4192 break; 4193 case Instruction::UIToFP: 4194 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 4195 break; 4196 case Instruction::SIToFP: 4197 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 4198 break; 4199 default: 4200 break; 4201 } 4202 4203 if (!CastedTo) 4204 return nullptr; 4205 4206 // Make sure the cast doesn't lose any information. 4207 Constant *CastedBack = 4208 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 4209 if (CastedBack != C) 4210 return nullptr; 4211 4212 return CastedTo; 4213 } 4214 4215 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 4216 Instruction::CastOps *CastOp) { 4217 SelectInst *SI = dyn_cast<SelectInst>(V); 4218 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 4219 4220 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 4221 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 4222 4223 CmpInst::Predicate Pred = CmpI->getPredicate(); 4224 Value *CmpLHS = CmpI->getOperand(0); 4225 Value *CmpRHS = CmpI->getOperand(1); 4226 Value *TrueVal = SI->getTrueValue(); 4227 Value *FalseVal = SI->getFalseValue(); 4228 FastMathFlags FMF; 4229 if (isa<FPMathOperator>(CmpI)) 4230 FMF = CmpI->getFastMathFlags(); 4231 4232 // Bail out early. 4233 if (CmpI->isEquality()) 4234 return {SPF_UNKNOWN, SPNB_NA, false}; 4235 4236 // Deal with type mismatches. 4237 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 4238 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) 4239 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4240 cast<CastInst>(TrueVal)->getOperand(0), C, 4241 LHS, RHS); 4242 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) 4243 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4244 C, cast<CastInst>(FalseVal)->getOperand(0), 4245 LHS, RHS); 4246 } 4247 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 4248 LHS, RHS); 4249 } 4250 4251 /// Return true if "icmp Pred LHS RHS" is always true. 4252 static bool isTruePredicate(CmpInst::Predicate Pred, 4253 const Value *LHS, const Value *RHS, 4254 const DataLayout &DL, unsigned Depth, 4255 AssumptionCache *AC, const Instruction *CxtI, 4256 const DominatorTree *DT) { 4257 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 4258 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 4259 return true; 4260 4261 switch (Pred) { 4262 default: 4263 return false; 4264 4265 case CmpInst::ICMP_SLE: { 4266 const APInt *C; 4267 4268 // LHS s<= LHS +_{nsw} C if C >= 0 4269 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 4270 return !C->isNegative(); 4271 return false; 4272 } 4273 4274 case CmpInst::ICMP_ULE: { 4275 const APInt *C; 4276 4277 // LHS u<= LHS +_{nuw} C for any C 4278 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 4279 return true; 4280 4281 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 4282 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 4283 const Value *&X, 4284 const APInt *&CA, const APInt *&CB) { 4285 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 4286 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 4287 return true; 4288 4289 // If X & C == 0 then (X | C) == X +_{nuw} C 4290 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 4291 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 4292 KnownBits Known(CA->getBitWidth()); 4293 computeKnownBits(X, Known, DL, Depth + 1, AC, CxtI, DT); 4294 4295 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 4296 return true; 4297 } 4298 4299 return false; 4300 }; 4301 4302 const Value *X; 4303 const APInt *CLHS, *CRHS; 4304 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 4305 return CLHS->ule(*CRHS); 4306 4307 return false; 4308 } 4309 } 4310 } 4311 4312 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 4313 /// ALHS ARHS" is true. Otherwise, return None. 4314 static Optional<bool> 4315 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 4316 const Value *ARHS, const Value *BLHS, 4317 const Value *BRHS, const DataLayout &DL, 4318 unsigned Depth, AssumptionCache *AC, 4319 const Instruction *CxtI, const DominatorTree *DT) { 4320 switch (Pred) { 4321 default: 4322 return None; 4323 4324 case CmpInst::ICMP_SLT: 4325 case CmpInst::ICMP_SLE: 4326 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI, 4327 DT) && 4328 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT)) 4329 return true; 4330 return None; 4331 4332 case CmpInst::ICMP_ULT: 4333 case CmpInst::ICMP_ULE: 4334 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI, 4335 DT) && 4336 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT)) 4337 return true; 4338 return None; 4339 } 4340 } 4341 4342 /// Return true if the operands of the two compares match. IsSwappedOps is true 4343 /// when the operands match, but are swapped. 4344 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 4345 const Value *BLHS, const Value *BRHS, 4346 bool &IsSwappedOps) { 4347 4348 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 4349 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 4350 return IsMatchingOps || IsSwappedOps; 4351 } 4352 4353 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is 4354 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS 4355 /// BRHS" is false. Otherwise, return None if we can't infer anything. 4356 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 4357 const Value *ALHS, 4358 const Value *ARHS, 4359 CmpInst::Predicate BPred, 4360 const Value *BLHS, 4361 const Value *BRHS, 4362 bool IsSwappedOps) { 4363 // Canonicalize the operands so they're matching. 4364 if (IsSwappedOps) { 4365 std::swap(BLHS, BRHS); 4366 BPred = ICmpInst::getSwappedPredicate(BPred); 4367 } 4368 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 4369 return true; 4370 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 4371 return false; 4372 4373 return None; 4374 } 4375 4376 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is 4377 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS 4378 /// C2" is false. Otherwise, return None if we can't infer anything. 4379 static Optional<bool> 4380 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS, 4381 const ConstantInt *C1, 4382 CmpInst::Predicate BPred, 4383 const Value *BLHS, const ConstantInt *C2) { 4384 assert(ALHS == BLHS && "LHS operands must match."); 4385 ConstantRange DomCR = 4386 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 4387 ConstantRange CR = 4388 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); 4389 ConstantRange Intersection = DomCR.intersectWith(CR); 4390 ConstantRange Difference = DomCR.difference(CR); 4391 if (Intersection.isEmptySet()) 4392 return false; 4393 if (Difference.isEmptySet()) 4394 return true; 4395 return None; 4396 } 4397 4398 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 4399 const DataLayout &DL, bool InvertAPred, 4400 unsigned Depth, AssumptionCache *AC, 4401 const Instruction *CxtI, 4402 const DominatorTree *DT) { 4403 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for example. 4404 if (LHS->getType() != RHS->getType()) 4405 return None; 4406 4407 Type *OpTy = LHS->getType(); 4408 assert(OpTy->getScalarType()->isIntegerTy(1)); 4409 4410 // LHS ==> RHS by definition 4411 if (!InvertAPred && LHS == RHS) 4412 return true; 4413 4414 if (OpTy->isVectorTy()) 4415 // TODO: extending the code below to handle vectors 4416 return None; 4417 assert(OpTy->isIntegerTy(1) && "implied by above"); 4418 4419 ICmpInst::Predicate APred, BPred; 4420 Value *ALHS, *ARHS; 4421 Value *BLHS, *BRHS; 4422 4423 if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) || 4424 !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS)))) 4425 return None; 4426 4427 if (InvertAPred) 4428 APred = CmpInst::getInversePredicate(APred); 4429 4430 // Can we infer anything when the two compares have matching operands? 4431 bool IsSwappedOps; 4432 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) { 4433 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 4434 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps)) 4435 return Implication; 4436 // No amount of additional analysis will infer the second condition, so 4437 // early exit. 4438 return None; 4439 } 4440 4441 // Can we infer anything when the LHS operands match and the RHS operands are 4442 // constants (not necessarily matching)? 4443 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 4444 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 4445 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS, 4446 cast<ConstantInt>(BRHS))) 4447 return Implication; 4448 // No amount of additional analysis will infer the second condition, so 4449 // early exit. 4450 return None; 4451 } 4452 4453 if (APred == BPred) 4454 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC, 4455 CxtI, DT); 4456 4457 return None; 4458 } 4459