1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains routines that help analyze properties that chains of 11 // computations have. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/ADT/Optional.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/Analysis/AssumptionCache.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/MemoryBuiltins.h" 21 #include "llvm/Analysis/Loads.h" 22 #include "llvm/Analysis/LoopInfo.h" 23 #include "llvm/Analysis/OptimizationDiagnosticInfo.h" 24 #include "llvm/Analysis/VectorUtils.h" 25 #include "llvm/IR/CallSite.h" 26 #include "llvm/IR/ConstantRange.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/Dominators.h" 30 #include "llvm/IR/GetElementPtrTypeIterator.h" 31 #include "llvm/IR/GlobalAlias.h" 32 #include "llvm/IR/GlobalVariable.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/LLVMContext.h" 36 #include "llvm/IR/Metadata.h" 37 #include "llvm/IR/Operator.h" 38 #include "llvm/IR/PatternMatch.h" 39 #include "llvm/IR/Statepoint.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/MathExtras.h" 42 #include <algorithm> 43 #include <array> 44 #include <cstring> 45 using namespace llvm; 46 using namespace llvm::PatternMatch; 47 48 const unsigned MaxDepth = 6; 49 50 // Controls the number of uses of the value searched for possible 51 // dominating comparisons. 52 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 53 cl::Hidden, cl::init(20)); 54 55 // This optimization is known to cause performance regressions is some cases, 56 // keep it under a temporary flag for now. 57 static cl::opt<bool> 58 DontImproveNonNegativePhiBits("dont-improve-non-negative-phi-bits", 59 cl::Hidden, cl::init(true)); 60 61 /// Returns the bitwidth of the given scalar or pointer type (if unknown returns 62 /// 0). For vector types, returns the element type's bitwidth. 63 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 64 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 65 return BitWidth; 66 67 return DL.getPointerTypeSizeInBits(Ty); 68 } 69 70 namespace { 71 // Simplifying using an assume can only be done in a particular control-flow 72 // context (the context instruction provides that context). If an assume and 73 // the context instruction are not in the same block then the DT helps in 74 // figuring out if we can use it. 75 struct Query { 76 const DataLayout &DL; 77 AssumptionCache *AC; 78 const Instruction *CxtI; 79 const DominatorTree *DT; 80 // Unlike the other analyses, this may be a nullptr because not all clients 81 // provide it currently. 82 OptimizationRemarkEmitter *ORE; 83 84 /// Set of assumptions that should be excluded from further queries. 85 /// This is because of the potential for mutual recursion to cause 86 /// computeKnownBits to repeatedly visit the same assume intrinsic. The 87 /// classic case of this is assume(x = y), which will attempt to determine 88 /// bits in x from bits in y, which will attempt to determine bits in y from 89 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 90 /// isKnownNonZero, which calls computeKnownBits and ComputeSignBit and 91 /// isKnownToBeAPowerOfTwo (all of which can call computeKnownBits), and so 92 /// on. 93 std::array<const Value *, MaxDepth> Excluded; 94 unsigned NumExcluded; 95 96 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 97 const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr) 98 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), NumExcluded(0) {} 99 100 Query(const Query &Q, const Value *NewExcl) 101 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), 102 NumExcluded(Q.NumExcluded) { 103 Excluded = Q.Excluded; 104 Excluded[NumExcluded++] = NewExcl; 105 assert(NumExcluded <= Excluded.size()); 106 } 107 108 bool isExcluded(const Value *Value) const { 109 if (NumExcluded == 0) 110 return false; 111 auto End = Excluded.begin() + NumExcluded; 112 return std::find(Excluded.begin(), End, Value) != End; 113 } 114 }; 115 } // end anonymous namespace 116 117 // Given the provided Value and, potentially, a context instruction, return 118 // the preferred context instruction (if any). 119 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 120 // If we've been provided with a context instruction, then use that (provided 121 // it has been inserted). 122 if (CxtI && CxtI->getParent()) 123 return CxtI; 124 125 // If the value is really an already-inserted instruction, then use that. 126 CxtI = dyn_cast<Instruction>(V); 127 if (CxtI && CxtI->getParent()) 128 return CxtI; 129 130 return nullptr; 131 } 132 133 static void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne, 134 unsigned Depth, const Query &Q); 135 136 void llvm::computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne, 137 const DataLayout &DL, unsigned Depth, 138 AssumptionCache *AC, const Instruction *CxtI, 139 const DominatorTree *DT, 140 OptimizationRemarkEmitter *ORE) { 141 ::computeKnownBits(V, KnownZero, KnownOne, Depth, 142 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE)); 143 } 144 145 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 146 const DataLayout &DL, 147 AssumptionCache *AC, const Instruction *CxtI, 148 const DominatorTree *DT) { 149 assert(LHS->getType() == RHS->getType() && 150 "LHS and RHS should have the same type"); 151 assert(LHS->getType()->isIntOrIntVectorTy() && 152 "LHS and RHS should be integers"); 153 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 154 APInt LHSKnownZero(IT->getBitWidth(), 0), LHSKnownOne(IT->getBitWidth(), 0); 155 APInt RHSKnownZero(IT->getBitWidth(), 0), RHSKnownOne(IT->getBitWidth(), 0); 156 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, 0, AC, CxtI, DT); 157 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, 0, AC, CxtI, DT); 158 return (LHSKnownZero | RHSKnownZero).isAllOnesValue(); 159 } 160 161 static void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne, 162 unsigned Depth, const Query &Q); 163 164 void llvm::ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne, 165 const DataLayout &DL, unsigned Depth, 166 AssumptionCache *AC, const Instruction *CxtI, 167 const DominatorTree *DT) { 168 ::ComputeSignBit(V, KnownZero, KnownOne, Depth, 169 Query(DL, AC, safeCxtI(V, CxtI), DT)); 170 } 171 172 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 173 const Query &Q); 174 175 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 176 bool OrZero, 177 unsigned Depth, AssumptionCache *AC, 178 const Instruction *CxtI, 179 const DominatorTree *DT) { 180 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, 181 Query(DL, AC, safeCxtI(V, CxtI), DT)); 182 } 183 184 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 185 186 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 187 AssumptionCache *AC, const Instruction *CxtI, 188 const DominatorTree *DT) { 189 return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 190 } 191 192 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 193 unsigned Depth, 194 AssumptionCache *AC, const Instruction *CxtI, 195 const DominatorTree *DT) { 196 bool NonNegative, Negative; 197 ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT); 198 return NonNegative; 199 } 200 201 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 202 AssumptionCache *AC, const Instruction *CxtI, 203 const DominatorTree *DT) { 204 if (auto *CI = dyn_cast<ConstantInt>(V)) 205 return CI->getValue().isStrictlyPositive(); 206 207 // TODO: We'd doing two recursive queries here. We should factor this such 208 // that only a single query is needed. 209 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) && 210 isKnownNonZero(V, DL, Depth, AC, CxtI, DT); 211 } 212 213 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 214 AssumptionCache *AC, const Instruction *CxtI, 215 const DominatorTree *DT) { 216 bool NonNegative, Negative; 217 ComputeSignBit(V, NonNegative, Negative, DL, Depth, AC, CxtI, DT); 218 return Negative; 219 } 220 221 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); 222 223 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 224 const DataLayout &DL, 225 AssumptionCache *AC, const Instruction *CxtI, 226 const DominatorTree *DT) { 227 return ::isKnownNonEqual(V1, V2, Query(DL, AC, 228 safeCxtI(V1, safeCxtI(V2, CxtI)), 229 DT)); 230 } 231 232 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 233 const Query &Q); 234 235 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 236 const DataLayout &DL, 237 unsigned Depth, AssumptionCache *AC, 238 const Instruction *CxtI, const DominatorTree *DT) { 239 return ::MaskedValueIsZero(V, Mask, Depth, 240 Query(DL, AC, safeCxtI(V, CxtI), DT)); 241 } 242 243 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 244 const Query &Q); 245 246 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 247 unsigned Depth, AssumptionCache *AC, 248 const Instruction *CxtI, 249 const DominatorTree *DT) { 250 return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 251 } 252 253 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 254 bool NSW, 255 APInt &KnownZero, APInt &KnownOne, 256 APInt &KnownZero2, APInt &KnownOne2, 257 unsigned Depth, const Query &Q) { 258 unsigned BitWidth = KnownZero.getBitWidth(); 259 260 // If an initial sequence of bits in the result is not needed, the 261 // corresponding bits in the operands are not needed. 262 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 263 computeKnownBits(Op0, LHSKnownZero, LHSKnownOne, Depth + 1, Q); 264 computeKnownBits(Op1, KnownZero2, KnownOne2, Depth + 1, Q); 265 266 // Carry in a 1 for a subtract, rather than a 0. 267 uint64_t CarryIn = 0; 268 if (!Add) { 269 // Sum = LHS + ~RHS + 1 270 std::swap(KnownZero2, KnownOne2); 271 CarryIn = 1; 272 } 273 274 APInt PossibleSumZero = ~LHSKnownZero + ~KnownZero2 + CarryIn; 275 APInt PossibleSumOne = LHSKnownOne + KnownOne2 + CarryIn; 276 277 // Compute known bits of the carry. 278 APInt CarryKnownZero = ~(PossibleSumZero ^ LHSKnownZero ^ KnownZero2); 279 APInt CarryKnownOne = PossibleSumOne ^ LHSKnownOne ^ KnownOne2; 280 281 // Compute set of known bits (where all three relevant bits are known). 282 APInt LHSKnown = LHSKnownZero | LHSKnownOne; 283 APInt RHSKnown = KnownZero2 | KnownOne2; 284 APInt CarryKnown = CarryKnownZero | CarryKnownOne; 285 APInt Known = LHSKnown & RHSKnown & CarryKnown; 286 287 assert((PossibleSumZero & Known) == (PossibleSumOne & Known) && 288 "known bits of sum differ"); 289 290 // Compute known bits of the result. 291 KnownZero = ~PossibleSumOne & Known; 292 KnownOne = PossibleSumOne & Known; 293 294 // Are we still trying to solve for the sign bit? 295 if (!Known.isNegative()) { 296 if (NSW) { 297 // Adding two non-negative numbers, or subtracting a negative number from 298 // a non-negative one, can't wrap into negative. 299 if (LHSKnownZero.isNegative() && KnownZero2.isNegative()) 300 KnownZero.setSignBit(); 301 // Adding two negative numbers, or subtracting a non-negative number from 302 // a negative one, can't wrap into non-negative. 303 else if (LHSKnownOne.isNegative() && KnownOne2.isNegative()) 304 KnownOne.setSignBit(); 305 } 306 } 307 } 308 309 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 310 APInt &KnownZero, APInt &KnownOne, 311 APInt &KnownZero2, APInt &KnownOne2, 312 unsigned Depth, const Query &Q) { 313 unsigned BitWidth = KnownZero.getBitWidth(); 314 computeKnownBits(Op1, KnownZero, KnownOne, Depth + 1, Q); 315 computeKnownBits(Op0, KnownZero2, KnownOne2, Depth + 1, Q); 316 317 bool isKnownNegative = false; 318 bool isKnownNonNegative = false; 319 // If the multiplication is known not to overflow, compute the sign bit. 320 if (NSW) { 321 if (Op0 == Op1) { 322 // The product of a number with itself is non-negative. 323 isKnownNonNegative = true; 324 } else { 325 bool isKnownNonNegativeOp1 = KnownZero.isNegative(); 326 bool isKnownNonNegativeOp0 = KnownZero2.isNegative(); 327 bool isKnownNegativeOp1 = KnownOne.isNegative(); 328 bool isKnownNegativeOp0 = KnownOne2.isNegative(); 329 // The product of two numbers with the same sign is non-negative. 330 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 331 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 332 // The product of a negative number and a non-negative number is either 333 // negative or zero. 334 if (!isKnownNonNegative) 335 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 336 isKnownNonZero(Op0, Depth, Q)) || 337 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 338 isKnownNonZero(Op1, Depth, Q)); 339 } 340 } 341 342 // If low bits are zero in either operand, output low known-0 bits. 343 // Also compute a conservative estimate for high known-0 bits. 344 // More trickiness is possible, but this is sufficient for the 345 // interesting case of alignment computation. 346 KnownOne.clearAllBits(); 347 unsigned TrailZ = KnownZero.countTrailingOnes() + 348 KnownZero2.countTrailingOnes(); 349 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 350 KnownZero2.countLeadingOnes(), 351 BitWidth) - BitWidth; 352 353 TrailZ = std::min(TrailZ, BitWidth); 354 LeadZ = std::min(LeadZ, BitWidth); 355 KnownZero.clearAllBits(); 356 KnownZero.setLowBits(TrailZ); 357 KnownZero.setHighBits(LeadZ); 358 359 // Only make use of no-wrap flags if we failed to compute the sign bit 360 // directly. This matters if the multiplication always overflows, in 361 // which case we prefer to follow the result of the direct computation, 362 // though as the program is invoking undefined behaviour we can choose 363 // whatever we like here. 364 if (isKnownNonNegative && !KnownOne.isNegative()) 365 KnownZero.setSignBit(); 366 else if (isKnownNegative && !KnownZero.isNegative()) 367 KnownOne.setSignBit(); 368 } 369 370 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 371 APInt &KnownZero, 372 APInt &KnownOne) { 373 unsigned BitWidth = KnownZero.getBitWidth(); 374 unsigned NumRanges = Ranges.getNumOperands() / 2; 375 assert(NumRanges >= 1); 376 377 KnownZero.setAllBits(); 378 KnownOne.setAllBits(); 379 380 for (unsigned i = 0; i < NumRanges; ++i) { 381 ConstantInt *Lower = 382 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 383 ConstantInt *Upper = 384 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 385 ConstantRange Range(Lower->getValue(), Upper->getValue()); 386 387 // The first CommonPrefixBits of all values in Range are equal. 388 unsigned CommonPrefixBits = 389 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 390 391 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 392 KnownOne &= Range.getUnsignedMax() & Mask; 393 KnownZero &= ~Range.getUnsignedMax() & Mask; 394 } 395 } 396 397 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 398 SmallVector<const Value *, 16> WorkSet(1, I); 399 SmallPtrSet<const Value *, 32> Visited; 400 SmallPtrSet<const Value *, 16> EphValues; 401 402 // The instruction defining an assumption's condition itself is always 403 // considered ephemeral to that assumption (even if it has other 404 // non-ephemeral users). See r246696's test case for an example. 405 if (is_contained(I->operands(), E)) 406 return true; 407 408 while (!WorkSet.empty()) { 409 const Value *V = WorkSet.pop_back_val(); 410 if (!Visited.insert(V).second) 411 continue; 412 413 // If all uses of this value are ephemeral, then so is this value. 414 if (all_of(V->users(), [&](const User *U) { return EphValues.count(U); })) { 415 if (V == E) 416 return true; 417 418 EphValues.insert(V); 419 if (const User *U = dyn_cast<User>(V)) 420 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 421 J != JE; ++J) { 422 if (isSafeToSpeculativelyExecute(*J)) 423 WorkSet.push_back(*J); 424 } 425 } 426 } 427 428 return false; 429 } 430 431 // Is this an intrinsic that cannot be speculated but also cannot trap? 432 static bool isAssumeLikeIntrinsic(const Instruction *I) { 433 if (const CallInst *CI = dyn_cast<CallInst>(I)) 434 if (Function *F = CI->getCalledFunction()) 435 switch (F->getIntrinsicID()) { 436 default: break; 437 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 438 case Intrinsic::assume: 439 case Intrinsic::dbg_declare: 440 case Intrinsic::dbg_value: 441 case Intrinsic::invariant_start: 442 case Intrinsic::invariant_end: 443 case Intrinsic::lifetime_start: 444 case Intrinsic::lifetime_end: 445 case Intrinsic::objectsize: 446 case Intrinsic::ptr_annotation: 447 case Intrinsic::var_annotation: 448 return true; 449 } 450 451 return false; 452 } 453 454 bool llvm::isValidAssumeForContext(const Instruction *Inv, 455 const Instruction *CxtI, 456 const DominatorTree *DT) { 457 458 // There are two restrictions on the use of an assume: 459 // 1. The assume must dominate the context (or the control flow must 460 // reach the assume whenever it reaches the context). 461 // 2. The context must not be in the assume's set of ephemeral values 462 // (otherwise we will use the assume to prove that the condition 463 // feeding the assume is trivially true, thus causing the removal of 464 // the assume). 465 466 if (DT) { 467 if (DT->dominates(Inv, CxtI)) 468 return true; 469 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 470 // We don't have a DT, but this trivially dominates. 471 return true; 472 } 473 474 // With or without a DT, the only remaining case we will check is if the 475 // instructions are in the same BB. Give up if that is not the case. 476 if (Inv->getParent() != CxtI->getParent()) 477 return false; 478 479 // If we have a dom tree, then we now know that the assume doens't dominate 480 // the other instruction. If we don't have a dom tree then we can check if 481 // the assume is first in the BB. 482 if (!DT) { 483 // Search forward from the assume until we reach the context (or the end 484 // of the block); the common case is that the assume will come first. 485 for (auto I = std::next(BasicBlock::const_iterator(Inv)), 486 IE = Inv->getParent()->end(); I != IE; ++I) 487 if (&*I == CxtI) 488 return true; 489 } 490 491 // The context comes first, but they're both in the same block. Make sure 492 // there is nothing in between that might interrupt the control flow. 493 for (BasicBlock::const_iterator I = 494 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv); 495 I != IE; ++I) 496 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 497 return false; 498 499 return !isEphemeralValueOf(Inv, CxtI); 500 } 501 502 static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero, 503 APInt &KnownOne, unsigned Depth, 504 const Query &Q) { 505 // Use of assumptions is context-sensitive. If we don't have a context, we 506 // cannot use them! 507 if (!Q.AC || !Q.CxtI) 508 return; 509 510 unsigned BitWidth = KnownZero.getBitWidth(); 511 512 // Note that the patterns below need to be kept in sync with the code 513 // in AssumptionCache::updateAffectedValues. 514 515 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 516 if (!AssumeVH) 517 continue; 518 CallInst *I = cast<CallInst>(AssumeVH); 519 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 520 "Got assumption for the wrong function!"); 521 if (Q.isExcluded(I)) 522 continue; 523 524 // Warning: This loop can end up being somewhat performance sensetive. 525 // We're running this loop for once for each value queried resulting in a 526 // runtime of ~O(#assumes * #values). 527 528 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 529 "must be an assume intrinsic"); 530 531 Value *Arg = I->getArgOperand(0); 532 533 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 534 assert(BitWidth == 1 && "assume operand is not i1?"); 535 KnownZero.clearAllBits(); 536 KnownOne.setAllBits(); 537 return; 538 } 539 if (match(Arg, m_Not(m_Specific(V))) && 540 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 541 assert(BitWidth == 1 && "assume operand is not i1?"); 542 KnownZero.setAllBits(); 543 KnownOne.clearAllBits(); 544 return; 545 } 546 547 // The remaining tests are all recursive, so bail out if we hit the limit. 548 if (Depth == MaxDepth) 549 continue; 550 551 Value *A, *B; 552 auto m_V = m_CombineOr(m_Specific(V), 553 m_CombineOr(m_PtrToInt(m_Specific(V)), 554 m_BitCast(m_Specific(V)))); 555 556 CmpInst::Predicate Pred; 557 ConstantInt *C; 558 // assume(v = a) 559 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 560 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 561 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 562 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 563 KnownZero |= RHSKnownZero; 564 KnownOne |= RHSKnownOne; 565 // assume(v & b = a) 566 } else if (match(Arg, 567 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 568 Pred == ICmpInst::ICMP_EQ && 569 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 570 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 571 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 572 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); 573 computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I)); 574 575 // For those bits in the mask that are known to be one, we can propagate 576 // known bits from the RHS to V. 577 KnownZero |= RHSKnownZero & MaskKnownOne; 578 KnownOne |= RHSKnownOne & MaskKnownOne; 579 // assume(~(v & b) = a) 580 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 581 m_Value(A))) && 582 Pred == ICmpInst::ICMP_EQ && 583 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 584 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 585 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 586 APInt MaskKnownZero(BitWidth, 0), MaskKnownOne(BitWidth, 0); 587 computeKnownBits(B, MaskKnownZero, MaskKnownOne, Depth+1, Query(Q, I)); 588 589 // For those bits in the mask that are known to be one, we can propagate 590 // inverted known bits from the RHS to V. 591 KnownZero |= RHSKnownOne & MaskKnownOne; 592 KnownOne |= RHSKnownZero & MaskKnownOne; 593 // assume(v | b = a) 594 } else if (match(Arg, 595 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 596 Pred == ICmpInst::ICMP_EQ && 597 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 598 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 599 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 600 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 601 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I)); 602 603 // For those bits in B that are known to be zero, we can propagate known 604 // bits from the RHS to V. 605 KnownZero |= RHSKnownZero & BKnownZero; 606 KnownOne |= RHSKnownOne & BKnownZero; 607 // assume(~(v | b) = a) 608 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 609 m_Value(A))) && 610 Pred == ICmpInst::ICMP_EQ && 611 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 612 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 613 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 614 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 615 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I)); 616 617 // For those bits in B that are known to be zero, we can propagate 618 // inverted known bits from the RHS to V. 619 KnownZero |= RHSKnownOne & BKnownZero; 620 KnownOne |= RHSKnownZero & BKnownZero; 621 // assume(v ^ b = a) 622 } else if (match(Arg, 623 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 624 Pred == ICmpInst::ICMP_EQ && 625 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 626 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 627 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 628 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 629 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I)); 630 631 // For those bits in B that are known to be zero, we can propagate known 632 // bits from the RHS to V. For those bits in B that are known to be one, 633 // we can propagate inverted known bits from the RHS to V. 634 KnownZero |= RHSKnownZero & BKnownZero; 635 KnownOne |= RHSKnownOne & BKnownZero; 636 KnownZero |= RHSKnownOne & BKnownOne; 637 KnownOne |= RHSKnownZero & BKnownOne; 638 // assume(~(v ^ b) = a) 639 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 640 m_Value(A))) && 641 Pred == ICmpInst::ICMP_EQ && 642 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 643 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 644 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 645 APInt BKnownZero(BitWidth, 0), BKnownOne(BitWidth, 0); 646 computeKnownBits(B, BKnownZero, BKnownOne, Depth+1, Query(Q, I)); 647 648 // For those bits in B that are known to be zero, we can propagate 649 // inverted known bits from the RHS to V. For those bits in B that are 650 // known to be one, we can propagate known bits from the RHS to V. 651 KnownZero |= RHSKnownOne & BKnownZero; 652 KnownOne |= RHSKnownZero & BKnownZero; 653 KnownZero |= RHSKnownZero & BKnownOne; 654 KnownOne |= RHSKnownOne & BKnownOne; 655 // assume(v << c = a) 656 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 657 m_Value(A))) && 658 Pred == ICmpInst::ICMP_EQ && 659 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 660 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 661 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 662 // For those bits in RHS that are known, we can propagate them to known 663 // bits in V shifted to the right by C. 664 KnownZero |= RHSKnownZero.lshr(C->getZExtValue()); 665 KnownOne |= RHSKnownOne.lshr(C->getZExtValue()); 666 // assume(~(v << c) = a) 667 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 668 m_Value(A))) && 669 Pred == ICmpInst::ICMP_EQ && 670 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 671 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 672 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 673 // For those bits in RHS that are known, we can propagate them inverted 674 // to known bits in V shifted to the right by C. 675 KnownZero |= RHSKnownOne.lshr(C->getZExtValue()); 676 KnownOne |= RHSKnownZero.lshr(C->getZExtValue()); 677 // assume(v >> c = a) 678 } else if (match(Arg, 679 m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)), 680 m_AShr(m_V, m_ConstantInt(C))), 681 m_Value(A))) && 682 Pred == ICmpInst::ICMP_EQ && 683 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 684 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 685 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 686 // For those bits in RHS that are known, we can propagate them to known 687 // bits in V shifted to the right by C. 688 KnownZero |= RHSKnownZero << C->getZExtValue(); 689 KnownOne |= RHSKnownOne << C->getZExtValue(); 690 // assume(~(v >> c) = a) 691 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_CombineOr( 692 m_LShr(m_V, m_ConstantInt(C)), 693 m_AShr(m_V, m_ConstantInt(C)))), 694 m_Value(A))) && 695 Pred == ICmpInst::ICMP_EQ && 696 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 697 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 698 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 699 // For those bits in RHS that are known, we can propagate them inverted 700 // to known bits in V shifted to the right by C. 701 KnownZero |= RHSKnownOne << C->getZExtValue(); 702 KnownOne |= RHSKnownZero << C->getZExtValue(); 703 // assume(v >=_s c) where c is non-negative 704 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 705 Pred == ICmpInst::ICMP_SGE && 706 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 707 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 708 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 709 710 if (RHSKnownZero.isNegative()) { 711 // We know that the sign bit is zero. 712 KnownZero.setSignBit(); 713 } 714 // assume(v >_s c) where c is at least -1. 715 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 716 Pred == ICmpInst::ICMP_SGT && 717 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 718 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 719 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 720 721 if (RHSKnownOne.isAllOnesValue() || RHSKnownZero.isNegative()) { 722 // We know that the sign bit is zero. 723 KnownZero.setSignBit(); 724 } 725 // assume(v <=_s c) where c is negative 726 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 727 Pred == ICmpInst::ICMP_SLE && 728 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 729 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 730 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 731 732 if (RHSKnownOne.isNegative()) { 733 // We know that the sign bit is one. 734 KnownOne.setSignBit(); 735 } 736 // assume(v <_s c) where c is non-positive 737 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 738 Pred == ICmpInst::ICMP_SLT && 739 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 740 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 741 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 742 743 if (RHSKnownZero.isAllOnesValue() || RHSKnownOne.isNegative()) { 744 // We know that the sign bit is one. 745 KnownOne.setSignBit(); 746 } 747 // assume(v <=_u c) 748 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 749 Pred == ICmpInst::ICMP_ULE && 750 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 751 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 752 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 753 754 // Whatever high bits in c are zero are known to be zero. 755 KnownZero.setHighBits(RHSKnownZero.countLeadingOnes()); 756 // assume(v <_u c) 757 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 758 Pred == ICmpInst::ICMP_ULT && 759 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 760 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 761 computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); 762 763 // Whatever high bits in c are zero are known to be zero (if c is a power 764 // of 2, then one more). 765 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) 766 KnownZero.setHighBits(RHSKnownZero.countLeadingOnes()+1); 767 else 768 KnownZero.setHighBits(RHSKnownZero.countLeadingOnes()); 769 } 770 } 771 772 // If assumptions conflict with each other or previous known bits, then we 773 // have a logical fallacy. It's possible that the assumption is not reachable, 774 // so this isn't a real bug. On the other hand, the program may have undefined 775 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 776 // clear out the known bits, try to warn the user, and hope for the best. 777 if ((KnownZero & KnownOne) != 0) { 778 KnownZero.clearAllBits(); 779 KnownOne.clearAllBits(); 780 781 if (Q.ORE) { 782 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 783 OptimizationRemarkAnalysis ORA("value-tracking", "BadAssumption", CxtI); 784 Q.ORE->emit(ORA << "Detected conflicting code assumptions. Program may " 785 "have undefined behavior, or compiler may have " 786 "internal error."); 787 } 788 } 789 } 790 791 // Compute known bits from a shift operator, including those with a 792 // non-constant shift amount. KnownZero and KnownOne are the outputs of this 793 // function. KnownZero2 and KnownOne2 are pre-allocated temporaries with the 794 // same bit width as KnownZero and KnownOne. KZF and KOF are operator-specific 795 // functors that, given the known-zero or known-one bits respectively, and a 796 // shift amount, compute the implied known-zero or known-one bits of the shift 797 // operator's result respectively for that shift amount. The results from calling 798 // KZF and KOF are conservatively combined for all permitted shift amounts. 799 static void computeKnownBitsFromShiftOperator( 800 const Operator *I, APInt &KnownZero, APInt &KnownOne, APInt &KnownZero2, 801 APInt &KnownOne2, unsigned Depth, const Query &Q, 802 function_ref<APInt(const APInt &, unsigned)> KZF, 803 function_ref<APInt(const APInt &, unsigned)> KOF) { 804 unsigned BitWidth = KnownZero.getBitWidth(); 805 806 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 807 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); 808 809 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 810 KnownZero = KZF(KnownZero, ShiftAmt); 811 KnownOne = KOF(KnownOne, ShiftAmt); 812 // If there is conflict between KnownZero and KnownOne, this must be an 813 // overflowing left shift, so the shift result is undefined. Clear KnownZero 814 // and KnownOne bits so that other code could propagate this undef. 815 if ((KnownZero & KnownOne) != 0) { 816 KnownZero.clearAllBits(); 817 KnownOne.clearAllBits(); 818 } 819 820 return; 821 } 822 823 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q); 824 825 // If the shift amount could be greater than or equal to the bit-width of the LHS, the 826 // value could be undef, so we don't know anything about it. 827 if ((~KnownZero).uge(BitWidth)) { 828 KnownZero.clearAllBits(); 829 KnownOne.clearAllBits(); 830 return; 831 } 832 833 // Note: We cannot use KnownZero.getLimitedValue() here, because if 834 // BitWidth > 64 and any upper bits are known, we'll end up returning the 835 // limit value (which implies all bits are known). 836 uint64_t ShiftAmtKZ = KnownZero.zextOrTrunc(64).getZExtValue(); 837 uint64_t ShiftAmtKO = KnownOne.zextOrTrunc(64).getZExtValue(); 838 839 // It would be more-clearly correct to use the two temporaries for this 840 // calculation. Reusing the APInts here to prevent unnecessary allocations. 841 KnownZero.clearAllBits(); 842 KnownOne.clearAllBits(); 843 844 // If we know the shifter operand is nonzero, we can sometimes infer more 845 // known bits. However this is expensive to compute, so be lazy about it and 846 // only compute it when absolutely necessary. 847 Optional<bool> ShifterOperandIsNonZero; 848 849 // Early exit if we can't constrain any well-defined shift amount. 850 if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) { 851 ShifterOperandIsNonZero = 852 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 853 if (!*ShifterOperandIsNonZero) 854 return; 855 } 856 857 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 858 859 KnownZero = KnownOne = APInt::getAllOnesValue(BitWidth); 860 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 861 // Combine the shifted known input bits only for those shift amounts 862 // compatible with its known constraints. 863 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 864 continue; 865 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 866 continue; 867 // If we know the shifter is nonzero, we may be able to infer more known 868 // bits. This check is sunk down as far as possible to avoid the expensive 869 // call to isKnownNonZero if the cheaper checks above fail. 870 if (ShiftAmt == 0) { 871 if (!ShifterOperandIsNonZero.hasValue()) 872 ShifterOperandIsNonZero = 873 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 874 if (*ShifterOperandIsNonZero) 875 continue; 876 } 877 878 KnownZero &= KZF(KnownZero2, ShiftAmt); 879 KnownOne &= KOF(KnownOne2, ShiftAmt); 880 } 881 882 // If there are no compatible shift amounts, then we've proven that the shift 883 // amount must be >= the BitWidth, and the result is undefined. We could 884 // return anything we'd like, but we need to make sure the sets of known bits 885 // stay disjoint (it should be better for some other code to actually 886 // propagate the undef than to pick a value here using known bits). 887 if ((KnownZero & KnownOne) != 0) { 888 KnownZero.clearAllBits(); 889 KnownOne.clearAllBits(); 890 } 891 } 892 893 static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero, 894 APInt &KnownOne, unsigned Depth, 895 const Query &Q) { 896 unsigned BitWidth = KnownZero.getBitWidth(); 897 898 APInt KnownZero2(KnownZero), KnownOne2(KnownOne); 899 switch (I->getOpcode()) { 900 default: break; 901 case Instruction::Load: 902 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 903 computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne); 904 break; 905 case Instruction::And: { 906 // If either the LHS or the RHS are Zero, the result is zero. 907 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q); 908 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 909 910 // Output known-1 bits are only known if set in both the LHS & RHS. 911 KnownOne &= KnownOne2; 912 // Output known-0 are known to be clear if zero in either the LHS | RHS. 913 KnownZero |= KnownZero2; 914 915 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 916 // here we handle the more general case of adding any odd number by 917 // matching the form add(x, add(x, y)) where y is odd. 918 // TODO: This could be generalized to clearing any bit set in y where the 919 // following bit is known to be unset in y. 920 Value *Y = nullptr; 921 if (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)), 922 m_Value(Y))) || 923 match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)), 924 m_Value(Y)))) { 925 APInt KnownZero3(BitWidth, 0), KnownOne3(BitWidth, 0); 926 computeKnownBits(Y, KnownZero3, KnownOne3, Depth + 1, Q); 927 if (KnownOne3.countTrailingOnes() > 0) 928 KnownZero.setBit(0); 929 } 930 break; 931 } 932 case Instruction::Or: { 933 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q); 934 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 935 936 // Output known-0 bits are only known if clear in both the LHS & RHS. 937 KnownZero &= KnownZero2; 938 // Output known-1 are known to be set if set in either the LHS | RHS. 939 KnownOne |= KnownOne2; 940 break; 941 } 942 case Instruction::Xor: { 943 computeKnownBits(I->getOperand(1), KnownZero, KnownOne, Depth + 1, Q); 944 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 945 946 // Output known-0 bits are known if clear or set in both the LHS & RHS. 947 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 948 // Output known-1 are known to be set if set in only one of the LHS, RHS. 949 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 950 KnownZero = KnownZeroOut; 951 break; 952 } 953 case Instruction::Mul: { 954 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 955 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, KnownZero, 956 KnownOne, KnownZero2, KnownOne2, Depth, Q); 957 break; 958 } 959 case Instruction::UDiv: { 960 // For the purposes of computing leading zeros we can conservatively 961 // treat a udiv as a logical right shift by the power of 2 known to 962 // be less than the denominator. 963 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 964 unsigned LeadZ = KnownZero2.countLeadingOnes(); 965 966 KnownOne2.clearAllBits(); 967 KnownZero2.clearAllBits(); 968 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q); 969 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 970 if (RHSUnknownLeadingOnes != BitWidth) 971 LeadZ = std::min(BitWidth, 972 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 973 974 KnownZero.setHighBits(LeadZ); 975 break; 976 } 977 case Instruction::Select: { 978 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q); 979 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q); 980 981 const Value *LHS; 982 const Value *RHS; 983 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 984 if (SelectPatternResult::isMinOrMax(SPF)) { 985 computeKnownBits(RHS, KnownZero, KnownOne, Depth + 1, Q); 986 computeKnownBits(LHS, KnownZero2, KnownOne2, Depth + 1, Q); 987 } else { 988 computeKnownBits(I->getOperand(2), KnownZero, KnownOne, Depth + 1, Q); 989 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q); 990 } 991 992 unsigned MaxHighOnes = 0; 993 unsigned MaxHighZeros = 0; 994 if (SPF == SPF_SMAX) { 995 // If both sides are negative, the result is negative. 996 if (KnownOne.isNegative() && KnownOne2.isNegative()) 997 // We can derive a lower bound on the result by taking the max of the 998 // leading one bits. 999 MaxHighOnes = 1000 std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes()); 1001 // If either side is non-negative, the result is non-negative. 1002 else if (KnownZero.isNegative() || KnownZero2.isNegative()) 1003 MaxHighZeros = 1; 1004 } else if (SPF == SPF_SMIN) { 1005 // If both sides are non-negative, the result is non-negative. 1006 if (KnownZero.isNegative() && KnownZero2.isNegative()) 1007 // We can derive an upper bound on the result by taking the max of the 1008 // leading zero bits. 1009 MaxHighZeros = std::max(KnownZero.countLeadingOnes(), 1010 KnownZero2.countLeadingOnes()); 1011 // If either side is negative, the result is negative. 1012 else if (KnownOne[BitWidth - 1] || KnownOne2[BitWidth - 1]) 1013 MaxHighOnes = 1; 1014 } else if (SPF == SPF_UMAX) { 1015 // We can derive a lower bound on the result by taking the max of the 1016 // leading one bits. 1017 MaxHighOnes = 1018 std::max(KnownOne.countLeadingOnes(), KnownOne2.countLeadingOnes()); 1019 } else if (SPF == SPF_UMIN) { 1020 // We can derive an upper bound on the result by taking the max of the 1021 // leading zero bits. 1022 MaxHighZeros = 1023 std::max(KnownZero.countLeadingOnes(), KnownZero2.countLeadingOnes()); 1024 } 1025 1026 // Only known if known in both the LHS and RHS. 1027 KnownOne &= KnownOne2; 1028 KnownZero &= KnownZero2; 1029 if (MaxHighOnes > 0) 1030 KnownOne.setHighBits(MaxHighOnes); 1031 if (MaxHighZeros > 0) 1032 KnownZero.setHighBits(MaxHighZeros); 1033 break; 1034 } 1035 case Instruction::FPTrunc: 1036 case Instruction::FPExt: 1037 case Instruction::FPToUI: 1038 case Instruction::FPToSI: 1039 case Instruction::SIToFP: 1040 case Instruction::UIToFP: 1041 break; // Can't work with floating point. 1042 case Instruction::PtrToInt: 1043 case Instruction::IntToPtr: 1044 // Fall through and handle them the same as zext/trunc. 1045 LLVM_FALLTHROUGH; 1046 case Instruction::ZExt: 1047 case Instruction::Trunc: { 1048 Type *SrcTy = I->getOperand(0)->getType(); 1049 1050 unsigned SrcBitWidth; 1051 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1052 // which fall through here. 1053 SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType()); 1054 1055 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1056 KnownZero = KnownZero.zextOrTrunc(SrcBitWidth); 1057 KnownOne = KnownOne.zextOrTrunc(SrcBitWidth); 1058 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 1059 KnownZero = KnownZero.zextOrTrunc(BitWidth); 1060 KnownOne = KnownOne.zextOrTrunc(BitWidth); 1061 // Any top bits are known to be zero. 1062 if (BitWidth > SrcBitWidth) 1063 KnownZero.setBitsFrom(SrcBitWidth); 1064 break; 1065 } 1066 case Instruction::BitCast: { 1067 Type *SrcTy = I->getOperand(0)->getType(); 1068 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 1069 // TODO: For now, not handling conversions like: 1070 // (bitcast i64 %x to <2 x i32>) 1071 !I->getType()->isVectorTy()) { 1072 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 1073 break; 1074 } 1075 break; 1076 } 1077 case Instruction::SExt: { 1078 // Compute the bits in the result that are not present in the input. 1079 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1080 1081 KnownZero = KnownZero.trunc(SrcBitWidth); 1082 KnownOne = KnownOne.trunc(SrcBitWidth); 1083 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 1084 KnownZero = KnownZero.zext(BitWidth); 1085 KnownOne = KnownOne.zext(BitWidth); 1086 1087 // If the sign bit of the input is known set or clear, then we know the 1088 // top bits of the result. 1089 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero 1090 KnownZero.setBitsFrom(SrcBitWidth); 1091 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set 1092 KnownOne.setBitsFrom(SrcBitWidth); 1093 break; 1094 } 1095 case Instruction::Shl: { 1096 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1097 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1098 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) { 1099 APInt KZResult = KnownZero << ShiftAmt; 1100 KZResult.setLowBits(ShiftAmt); // Low bits known 0. 1101 // If this shift has "nsw" keyword, then the result is either a poison 1102 // value or has the same sign bit as the first operand. 1103 if (NSW && KnownZero.isNegative()) 1104 KZResult.setSignBit(); 1105 return KZResult; 1106 }; 1107 1108 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) { 1109 APInt KOResult = KnownOne << ShiftAmt; 1110 if (NSW && KnownOne.isNegative()) 1111 KOResult.setSignBit(); 1112 return KOResult; 1113 }; 1114 1115 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne, 1116 KnownZero2, KnownOne2, Depth, Q, KZF, 1117 KOF); 1118 break; 1119 } 1120 case Instruction::LShr: { 1121 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1122 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) { 1123 return KnownZero.lshr(ShiftAmt) | 1124 // High bits known zero. 1125 APInt::getHighBitsSet(BitWidth, ShiftAmt); 1126 }; 1127 1128 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1129 return KnownOne.lshr(ShiftAmt); 1130 }; 1131 1132 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne, 1133 KnownZero2, KnownOne2, Depth, Q, KZF, 1134 KOF); 1135 break; 1136 } 1137 case Instruction::AShr: { 1138 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1139 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1140 return KnownZero.ashr(ShiftAmt); 1141 }; 1142 1143 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1144 return KnownOne.ashr(ShiftAmt); 1145 }; 1146 1147 computeKnownBitsFromShiftOperator(I, KnownZero, KnownOne, 1148 KnownZero2, KnownOne2, Depth, Q, KZF, 1149 KOF); 1150 break; 1151 } 1152 case Instruction::Sub: { 1153 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1154 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1155 KnownZero, KnownOne, KnownZero2, KnownOne2, Depth, 1156 Q); 1157 break; 1158 } 1159 case Instruction::Add: { 1160 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1161 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1162 KnownZero, KnownOne, KnownZero2, KnownOne2, Depth, 1163 Q); 1164 break; 1165 } 1166 case Instruction::SRem: 1167 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1168 APInt RA = Rem->getValue().abs(); 1169 if (RA.isPowerOf2()) { 1170 APInt LowBits = RA - 1; 1171 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, 1172 Q); 1173 1174 // The low bits of the first operand are unchanged by the srem. 1175 KnownZero = KnownZero2 & LowBits; 1176 KnownOne = KnownOne2 & LowBits; 1177 1178 // If the first operand is non-negative or has all low bits zero, then 1179 // the upper bits are all zero. 1180 if (KnownZero2.isNegative() || ((KnownZero2 & LowBits) == LowBits)) 1181 KnownZero |= ~LowBits; 1182 1183 // If the first operand is negative and not all low bits are zero, then 1184 // the upper bits are all one. 1185 if (KnownOne2.isNegative() && ((KnownOne2 & LowBits) != 0)) 1186 KnownOne |= ~LowBits; 1187 1188 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1189 } 1190 } 1191 1192 // The sign bit is the LHS's sign bit, except when the result of the 1193 // remainder is zero. 1194 if (KnownZero.isNonNegative()) { 1195 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 1196 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1, 1197 Q); 1198 // If it's known zero, our sign bit is also zero. 1199 if (LHSKnownZero.isNegative()) 1200 KnownZero.setSignBit(); 1201 } 1202 1203 break; 1204 case Instruction::URem: { 1205 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1206 const APInt &RA = Rem->getValue(); 1207 if (RA.isPowerOf2()) { 1208 APInt LowBits = (RA - 1); 1209 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 1210 KnownZero |= ~LowBits; 1211 KnownOne &= LowBits; 1212 break; 1213 } 1214 } 1215 1216 // Since the result is less than or equal to either operand, any leading 1217 // zero bits in either operand must also exist in the result. 1218 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 1219 computeKnownBits(I->getOperand(1), KnownZero2, KnownOne2, Depth + 1, Q); 1220 1221 unsigned Leaders = std::max(KnownZero.countLeadingOnes(), 1222 KnownZero2.countLeadingOnes()); 1223 KnownOne.clearAllBits(); 1224 KnownZero.clearAllBits(); 1225 KnownZero.setHighBits(Leaders); 1226 break; 1227 } 1228 1229 case Instruction::Alloca: { 1230 const AllocaInst *AI = cast<AllocaInst>(I); 1231 unsigned Align = AI->getAlignment(); 1232 if (Align == 0) 1233 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); 1234 1235 if (Align > 0) 1236 KnownZero.setLowBits(countTrailingZeros(Align)); 1237 break; 1238 } 1239 case Instruction::GetElementPtr: { 1240 // Analyze all of the subscripts of this getelementptr instruction 1241 // to determine if we can prove known low zero bits. 1242 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); 1243 computeKnownBits(I->getOperand(0), LocalKnownZero, LocalKnownOne, Depth + 1, 1244 Q); 1245 unsigned TrailZ = LocalKnownZero.countTrailingOnes(); 1246 1247 gep_type_iterator GTI = gep_type_begin(I); 1248 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1249 Value *Index = I->getOperand(i); 1250 if (StructType *STy = GTI.getStructTypeOrNull()) { 1251 // Handle struct member offset arithmetic. 1252 1253 // Handle case when index is vector zeroinitializer 1254 Constant *CIndex = cast<Constant>(Index); 1255 if (CIndex->isZeroValue()) 1256 continue; 1257 1258 if (CIndex->getType()->isVectorTy()) 1259 Index = CIndex->getSplatValue(); 1260 1261 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1262 const StructLayout *SL = Q.DL.getStructLayout(STy); 1263 uint64_t Offset = SL->getElementOffset(Idx); 1264 TrailZ = std::min<unsigned>(TrailZ, 1265 countTrailingZeros(Offset)); 1266 } else { 1267 // Handle array index arithmetic. 1268 Type *IndexedTy = GTI.getIndexedType(); 1269 if (!IndexedTy->isSized()) { 1270 TrailZ = 0; 1271 break; 1272 } 1273 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1274 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1275 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); 1276 computeKnownBits(Index, LocalKnownZero, LocalKnownOne, Depth + 1, Q); 1277 TrailZ = std::min(TrailZ, 1278 unsigned(countTrailingZeros(TypeSize) + 1279 LocalKnownZero.countTrailingOnes())); 1280 } 1281 } 1282 1283 KnownZero.setLowBits(TrailZ); 1284 break; 1285 } 1286 case Instruction::PHI: { 1287 const PHINode *P = cast<PHINode>(I); 1288 // Handle the case of a simple two-predecessor recurrence PHI. 1289 // There's a lot more that could theoretically be done here, but 1290 // this is sufficient to catch some interesting cases. 1291 if (P->getNumIncomingValues() == 2) { 1292 for (unsigned i = 0; i != 2; ++i) { 1293 Value *L = P->getIncomingValue(i); 1294 Value *R = P->getIncomingValue(!i); 1295 Operator *LU = dyn_cast<Operator>(L); 1296 if (!LU) 1297 continue; 1298 unsigned Opcode = LU->getOpcode(); 1299 // Check for operations that have the property that if 1300 // both their operands have low zero bits, the result 1301 // will have low zero bits. 1302 if (Opcode == Instruction::Add || 1303 Opcode == Instruction::Sub || 1304 Opcode == Instruction::And || 1305 Opcode == Instruction::Or || 1306 Opcode == Instruction::Mul) { 1307 Value *LL = LU->getOperand(0); 1308 Value *LR = LU->getOperand(1); 1309 // Find a recurrence. 1310 if (LL == I) 1311 L = LR; 1312 else if (LR == I) 1313 L = LL; 1314 else 1315 break; 1316 // Ok, we have a PHI of the form L op= R. Check for low 1317 // zero bits. 1318 computeKnownBits(R, KnownZero2, KnownOne2, Depth + 1, Q); 1319 1320 // We need to take the minimum number of known bits 1321 APInt KnownZero3(KnownZero), KnownOne3(KnownOne); 1322 computeKnownBits(L, KnownZero3, KnownOne3, Depth + 1, Q); 1323 1324 KnownZero.setLowBits(std::min(KnownZero2.countTrailingOnes(), 1325 KnownZero3.countTrailingOnes())); 1326 1327 if (DontImproveNonNegativePhiBits) 1328 break; 1329 1330 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU); 1331 if (OverflowOp && OverflowOp->hasNoSignedWrap()) { 1332 // If initial value of recurrence is nonnegative, and we are adding 1333 // a nonnegative number with nsw, the result can only be nonnegative 1334 // or poison value regardless of the number of times we execute the 1335 // add in phi recurrence. If initial value is negative and we are 1336 // adding a negative number with nsw, the result can only be 1337 // negative or poison value. Similar arguments apply to sub and mul. 1338 // 1339 // (add non-negative, non-negative) --> non-negative 1340 // (add negative, negative) --> negative 1341 if (Opcode == Instruction::Add) { 1342 if (KnownZero2.isNegative() && KnownZero3.isNegative()) 1343 KnownZero.setSignBit(); 1344 else if (KnownOne2.isNegative() && KnownOne3.isNegative()) 1345 KnownOne.setSignBit(); 1346 } 1347 1348 // (sub nsw non-negative, negative) --> non-negative 1349 // (sub nsw negative, non-negative) --> negative 1350 else if (Opcode == Instruction::Sub && LL == I) { 1351 if (KnownZero2.isNegative() && KnownOne3.isNegative()) 1352 KnownZero.setSignBit(); 1353 else if (KnownOne2.isNegative() && KnownZero3.isNegative()) 1354 KnownOne.setSignBit(); 1355 } 1356 1357 // (mul nsw non-negative, non-negative) --> non-negative 1358 else if (Opcode == Instruction::Mul && KnownZero2.isNegative() && 1359 KnownZero3.isNegative()) 1360 KnownZero.setSignBit(); 1361 } 1362 1363 break; 1364 } 1365 } 1366 } 1367 1368 // Unreachable blocks may have zero-operand PHI nodes. 1369 if (P->getNumIncomingValues() == 0) 1370 break; 1371 1372 // Otherwise take the unions of the known bit sets of the operands, 1373 // taking conservative care to avoid excessive recursion. 1374 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { 1375 // Skip if every incoming value references to ourself. 1376 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1377 break; 1378 1379 KnownZero.setAllBits(); 1380 KnownOne.setAllBits(); 1381 for (Value *IncValue : P->incoming_values()) { 1382 // Skip direct self references. 1383 if (IncValue == P) continue; 1384 1385 KnownZero2 = APInt(BitWidth, 0); 1386 KnownOne2 = APInt(BitWidth, 0); 1387 // Recurse, but cap the recursion to one level, because we don't 1388 // want to waste time spinning around in loops. 1389 computeKnownBits(IncValue, KnownZero2, KnownOne2, MaxDepth - 1, Q); 1390 KnownZero &= KnownZero2; 1391 KnownOne &= KnownOne2; 1392 // If all bits have been ruled out, there's no need to check 1393 // more operands. 1394 if (!KnownZero && !KnownOne) 1395 break; 1396 } 1397 } 1398 break; 1399 } 1400 case Instruction::Call: 1401 case Instruction::Invoke: 1402 // If range metadata is attached to this call, set known bits from that, 1403 // and then intersect with known bits based on other properties of the 1404 // function. 1405 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range)) 1406 computeKnownBitsFromRangeMetadata(*MD, KnownZero, KnownOne); 1407 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) { 1408 computeKnownBits(RV, KnownZero2, KnownOne2, Depth + 1, Q); 1409 KnownZero |= KnownZero2; 1410 KnownOne |= KnownOne2; 1411 } 1412 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1413 switch (II->getIntrinsicID()) { 1414 default: break; 1415 case Intrinsic::bitreverse: 1416 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 1417 KnownZero |= KnownZero2.reverseBits(); 1418 KnownOne |= KnownOne2.reverseBits(); 1419 break; 1420 case Intrinsic::bswap: 1421 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 1422 KnownZero |= KnownZero2.byteSwap(); 1423 KnownOne |= KnownOne2.byteSwap(); 1424 break; 1425 case Intrinsic::ctlz: 1426 case Intrinsic::cttz: { 1427 unsigned LowBits = Log2_32(BitWidth)+1; 1428 // If this call is undefined for 0, the result will be less than 2^n. 1429 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1430 LowBits -= 1; 1431 KnownZero.setBitsFrom(LowBits); 1432 break; 1433 } 1434 case Intrinsic::ctpop: { 1435 computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, Depth + 1, Q); 1436 // We can bound the space the count needs. Also, bits known to be zero 1437 // can't contribute to the population. 1438 unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation(); 1439 unsigned LeadingZeros = 1440 APInt(BitWidth, BitsPossiblySet).countLeadingZeros(); 1441 assert(LeadingZeros <= BitWidth); 1442 KnownZero.setHighBits(LeadingZeros); 1443 KnownOne &= ~KnownZero; 1444 // TODO: we could bound KnownOne using the lower bound on the number 1445 // of bits which might be set provided by popcnt KnownOne2. 1446 break; 1447 } 1448 case Intrinsic::x86_sse42_crc32_64_64: 1449 KnownZero.setBitsFrom(32); 1450 break; 1451 } 1452 } 1453 break; 1454 case Instruction::ExtractElement: 1455 // Look through extract element. At the moment we keep this simple and skip 1456 // tracking the specific element. But at least we might find information 1457 // valid for all elements of the vector (for example if vector is sign 1458 // extended, shifted, etc). 1459 computeKnownBits(I->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 1460 break; 1461 case Instruction::ExtractValue: 1462 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1463 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1464 if (EVI->getNumIndices() != 1) break; 1465 if (EVI->getIndices()[0] == 0) { 1466 switch (II->getIntrinsicID()) { 1467 default: break; 1468 case Intrinsic::uadd_with_overflow: 1469 case Intrinsic::sadd_with_overflow: 1470 computeKnownBitsAddSub(true, II->getArgOperand(0), 1471 II->getArgOperand(1), false, KnownZero, 1472 KnownOne, KnownZero2, KnownOne2, Depth, Q); 1473 break; 1474 case Intrinsic::usub_with_overflow: 1475 case Intrinsic::ssub_with_overflow: 1476 computeKnownBitsAddSub(false, II->getArgOperand(0), 1477 II->getArgOperand(1), false, KnownZero, 1478 KnownOne, KnownZero2, KnownOne2, Depth, Q); 1479 break; 1480 case Intrinsic::umul_with_overflow: 1481 case Intrinsic::smul_with_overflow: 1482 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1483 KnownZero, KnownOne, KnownZero2, KnownOne2, Depth, 1484 Q); 1485 break; 1486 } 1487 } 1488 } 1489 } 1490 } 1491 1492 /// Determine which bits of V are known to be either zero or one and return 1493 /// them in the KnownZero/KnownOne bit sets. 1494 /// 1495 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1496 /// we cannot optimize based on the assumption that it is zero without changing 1497 /// it to be an explicit zero. If we don't change it to zero, other code could 1498 /// optimized based on the contradictory assumption that it is non-zero. 1499 /// Because instcombine aggressively folds operations with undef args anyway, 1500 /// this won't lose us code quality. 1501 /// 1502 /// This function is defined on values with integer type, values with pointer 1503 /// type, and vectors of integers. In the case 1504 /// where V is a vector, known zero, and known one values are the 1505 /// same width as the vector element, and the bit is set only if it is true 1506 /// for all of the elements in the vector. 1507 void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne, 1508 unsigned Depth, const Query &Q) { 1509 assert(V && "No Value?"); 1510 assert(Depth <= MaxDepth && "Limit Search Depth"); 1511 unsigned BitWidth = KnownZero.getBitWidth(); 1512 1513 assert((V->getType()->isIntOrIntVectorTy() || 1514 V->getType()->getScalarType()->isPointerTy()) && 1515 "Not integer or pointer type!"); 1516 assert((Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && 1517 (!V->getType()->isIntOrIntVectorTy() || 1518 V->getType()->getScalarSizeInBits() == BitWidth) && 1519 KnownZero.getBitWidth() == BitWidth && 1520 KnownOne.getBitWidth() == BitWidth && 1521 "V, KnownOne and KnownZero should have same BitWidth"); 1522 (void)BitWidth; 1523 1524 const APInt *C; 1525 if (match(V, m_APInt(C))) { 1526 // We know all of the bits for a scalar constant or a splat vector constant! 1527 KnownOne = *C; 1528 KnownZero = ~KnownOne; 1529 return; 1530 } 1531 // Null and aggregate-zero are all-zeros. 1532 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1533 KnownOne.clearAllBits(); 1534 KnownZero.setAllBits(); 1535 return; 1536 } 1537 // Handle a constant vector by taking the intersection of the known bits of 1538 // each element. 1539 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 1540 // We know that CDS must be a vector of integers. Take the intersection of 1541 // each element. 1542 KnownZero.setAllBits(); KnownOne.setAllBits(); 1543 APInt Elt(KnownZero.getBitWidth(), 0); 1544 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1545 Elt = CDS->getElementAsInteger(i); 1546 KnownZero &= ~Elt; 1547 KnownOne &= Elt; 1548 } 1549 return; 1550 } 1551 1552 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1553 // We know that CV must be a vector of integers. Take the intersection of 1554 // each element. 1555 KnownZero.setAllBits(); KnownOne.setAllBits(); 1556 APInt Elt(KnownZero.getBitWidth(), 0); 1557 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1558 Constant *Element = CV->getAggregateElement(i); 1559 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1560 if (!ElementCI) { 1561 KnownZero.clearAllBits(); 1562 KnownOne.clearAllBits(); 1563 return; 1564 } 1565 Elt = ElementCI->getValue(); 1566 KnownZero &= ~Elt; 1567 KnownOne &= Elt; 1568 } 1569 return; 1570 } 1571 1572 // Start out not knowing anything. 1573 KnownZero.clearAllBits(); KnownOne.clearAllBits(); 1574 1575 // We can't imply anything about undefs. 1576 if (isa<UndefValue>(V)) 1577 return; 1578 1579 // There's no point in looking through other users of ConstantData for 1580 // assumptions. Confirm that we've handled them all. 1581 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1582 1583 // Limit search depth. 1584 // All recursive calls that increase depth must come after this. 1585 if (Depth == MaxDepth) 1586 return; 1587 1588 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1589 // the bits of its aliasee. 1590 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1591 if (!GA->isInterposable()) 1592 computeKnownBits(GA->getAliasee(), KnownZero, KnownOne, Depth + 1, Q); 1593 return; 1594 } 1595 1596 if (const Operator *I = dyn_cast<Operator>(V)) 1597 computeKnownBitsFromOperator(I, KnownZero, KnownOne, Depth, Q); 1598 1599 // Aligned pointers have trailing zeros - refine KnownZero set 1600 if (V->getType()->isPointerTy()) { 1601 unsigned Align = V->getPointerAlignment(Q.DL); 1602 if (Align) 1603 KnownZero.setLowBits(countTrailingZeros(Align)); 1604 } 1605 1606 // computeKnownBitsFromAssume strictly refines KnownZero and 1607 // KnownOne. Therefore, we run them after computeKnownBitsFromOperator. 1608 1609 // Check whether a nearby assume intrinsic can determine some known bits. 1610 computeKnownBitsFromAssume(V, KnownZero, KnownOne, Depth, Q); 1611 1612 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 1613 } 1614 1615 /// Determine whether the sign bit is known to be zero or one. 1616 /// Convenience wrapper around computeKnownBits. 1617 void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne, 1618 unsigned Depth, const Query &Q) { 1619 unsigned BitWidth = getBitWidth(V->getType(), Q.DL); 1620 if (!BitWidth) { 1621 KnownZero = false; 1622 KnownOne = false; 1623 return; 1624 } 1625 APInt ZeroBits(BitWidth, 0); 1626 APInt OneBits(BitWidth, 0); 1627 computeKnownBits(V, ZeroBits, OneBits, Depth, Q); 1628 KnownOne = OneBits.isNegative(); 1629 KnownZero = ZeroBits.isNegative(); 1630 } 1631 1632 /// Return true if the given value is known to have exactly one 1633 /// bit set when defined. For vectors return true if every element is known to 1634 /// be a power of two when defined. Supports values with integer or pointer 1635 /// types and vectors of integers. 1636 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 1637 const Query &Q) { 1638 if (const Constant *C = dyn_cast<Constant>(V)) { 1639 if (C->isNullValue()) 1640 return OrZero; 1641 1642 const APInt *ConstIntOrConstSplatInt; 1643 if (match(C, m_APInt(ConstIntOrConstSplatInt))) 1644 return ConstIntOrConstSplatInt->isPowerOf2(); 1645 } 1646 1647 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1648 // it is shifted off the end then the result is undefined. 1649 if (match(V, m_Shl(m_One(), m_Value()))) 1650 return true; 1651 1652 // (signbit) >>l X is clearly a power of two if the one is not shifted off the 1653 // bottom. If it is shifted off the bottom then the result is undefined. 1654 if (match(V, m_LShr(m_SignBit(), m_Value()))) 1655 return true; 1656 1657 // The remaining tests are all recursive, so bail out if we hit the limit. 1658 if (Depth++ == MaxDepth) 1659 return false; 1660 1661 Value *X = nullptr, *Y = nullptr; 1662 // A shift left or a logical shift right of a power of two is a power of two 1663 // or zero. 1664 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1665 match(V, m_LShr(m_Value(X), m_Value())))) 1666 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 1667 1668 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1669 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 1670 1671 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 1672 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 1673 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 1674 1675 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1676 // A power of two and'd with anything is a power of two or zero. 1677 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 1678 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 1679 return true; 1680 // X & (-X) is always a power of two or zero. 1681 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1682 return true; 1683 return false; 1684 } 1685 1686 // Adding a power-of-two or zero to the same power-of-two or zero yields 1687 // either the original power-of-two, a larger power-of-two or zero. 1688 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1689 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1690 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { 1691 if (match(X, m_And(m_Specific(Y), m_Value())) || 1692 match(X, m_And(m_Value(), m_Specific(Y)))) 1693 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 1694 return true; 1695 if (match(Y, m_And(m_Specific(X), m_Value())) || 1696 match(Y, m_And(m_Value(), m_Specific(X)))) 1697 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 1698 return true; 1699 1700 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1701 APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0); 1702 computeKnownBits(X, LHSZeroBits, LHSOneBits, Depth, Q); 1703 1704 APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0); 1705 computeKnownBits(Y, RHSZeroBits, RHSOneBits, Depth, Q); 1706 // If i8 V is a power of two or zero: 1707 // ZeroBits: 1 1 1 0 1 1 1 1 1708 // ~ZeroBits: 0 0 0 1 0 0 0 0 1709 if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2()) 1710 // If OrZero isn't set, we cannot give back a zero result. 1711 // Make sure either the LHS or RHS has a bit set. 1712 if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue()) 1713 return true; 1714 } 1715 } 1716 1717 // An exact divide or right shift can only shift off zero bits, so the result 1718 // is a power of two only if the first operand is a power of two and not 1719 // copying a sign bit (sdiv int_min, 2). 1720 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1721 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1722 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1723 Depth, Q); 1724 } 1725 1726 return false; 1727 } 1728 1729 /// \brief Test whether a GEP's result is known to be non-null. 1730 /// 1731 /// Uses properties inherent in a GEP to try to determine whether it is known 1732 /// to be non-null. 1733 /// 1734 /// Currently this routine does not support vector GEPs. 1735 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 1736 const Query &Q) { 1737 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) 1738 return false; 1739 1740 // FIXME: Support vector-GEPs. 1741 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1742 1743 // If the base pointer is non-null, we cannot walk to a null address with an 1744 // inbounds GEP in address space zero. 1745 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 1746 return true; 1747 1748 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1749 // If so, then the GEP cannot produce a null pointer, as doing so would 1750 // inherently violate the inbounds contract within address space zero. 1751 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1752 GTI != GTE; ++GTI) { 1753 // Struct types are easy -- they must always be indexed by a constant. 1754 if (StructType *STy = GTI.getStructTypeOrNull()) { 1755 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1756 unsigned ElementIdx = OpC->getZExtValue(); 1757 const StructLayout *SL = Q.DL.getStructLayout(STy); 1758 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1759 if (ElementOffset > 0) 1760 return true; 1761 continue; 1762 } 1763 1764 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1765 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1766 continue; 1767 1768 // Fast path the constant operand case both for efficiency and so we don't 1769 // increment Depth when just zipping down an all-constant GEP. 1770 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1771 if (!OpC->isZero()) 1772 return true; 1773 continue; 1774 } 1775 1776 // We post-increment Depth here because while isKnownNonZero increments it 1777 // as well, when we pop back up that increment won't persist. We don't want 1778 // to recurse 10k times just because we have 10k GEP operands. We don't 1779 // bail completely out because we want to handle constant GEPs regardless 1780 // of depth. 1781 if (Depth++ >= MaxDepth) 1782 continue; 1783 1784 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 1785 return true; 1786 } 1787 1788 return false; 1789 } 1790 1791 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1792 /// ensure that the value it's attached to is never Value? 'RangeType' is 1793 /// is the type of the value described by the range. 1794 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 1795 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1796 assert(NumRanges >= 1); 1797 for (unsigned i = 0; i < NumRanges; ++i) { 1798 ConstantInt *Lower = 1799 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1800 ConstantInt *Upper = 1801 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1802 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1803 if (Range.contains(Value)) 1804 return false; 1805 } 1806 return true; 1807 } 1808 1809 /// Return true if the given value is known to be non-zero when defined. For 1810 /// vectors, return true if every element is known to be non-zero when 1811 /// defined. For pointers, if the context instruction and dominator tree are 1812 /// specified, perform context-sensitive analysis and return true if the 1813 /// pointer couldn't possibly be null at the specified instruction. 1814 /// Supports values with integer or pointer type and vectors of integers. 1815 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) { 1816 if (auto *C = dyn_cast<Constant>(V)) { 1817 if (C->isNullValue()) 1818 return false; 1819 if (isa<ConstantInt>(C)) 1820 // Must be non-zero due to null test above. 1821 return true; 1822 1823 // For constant vectors, check that all elements are undefined or known 1824 // non-zero to determine that the whole vector is known non-zero. 1825 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) { 1826 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 1827 Constant *Elt = C->getAggregateElement(i); 1828 if (!Elt || Elt->isNullValue()) 1829 return false; 1830 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 1831 return false; 1832 } 1833 return true; 1834 } 1835 1836 return false; 1837 } 1838 1839 if (auto *I = dyn_cast<Instruction>(V)) { 1840 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) { 1841 // If the possible ranges don't contain zero, then the value is 1842 // definitely non-zero. 1843 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 1844 const APInt ZeroValue(Ty->getBitWidth(), 0); 1845 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 1846 return true; 1847 } 1848 } 1849 } 1850 1851 // The remaining tests are all recursive, so bail out if we hit the limit. 1852 if (Depth++ >= MaxDepth) 1853 return false; 1854 1855 // Check for pointer simplifications. 1856 if (V->getType()->isPointerTy()) { 1857 if (isKnownNonNullAt(V, Q.CxtI, Q.DT)) 1858 return true; 1859 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 1860 if (isGEPKnownNonNull(GEP, Depth, Q)) 1861 return true; 1862 } 1863 1864 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 1865 1866 // X | Y != 0 if X != 0 or Y != 0. 1867 Value *X = nullptr, *Y = nullptr; 1868 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 1869 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q); 1870 1871 // ext X != 0 if X != 0. 1872 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 1873 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 1874 1875 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 1876 // if the lowest bit is shifted off the end. 1877 if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) { 1878 // shl nuw can't remove any non-zero bits. 1879 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1880 if (BO->hasNoUnsignedWrap()) 1881 return isKnownNonZero(X, Depth, Q); 1882 1883 APInt KnownZero(BitWidth, 0); 1884 APInt KnownOne(BitWidth, 0); 1885 computeKnownBits(X, KnownZero, KnownOne, Depth, Q); 1886 if (KnownOne[0]) 1887 return true; 1888 } 1889 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 1890 // defined if the sign bit is shifted off the end. 1891 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 1892 // shr exact can only shift out zero bits. 1893 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 1894 if (BO->isExact()) 1895 return isKnownNonZero(X, Depth, Q); 1896 1897 bool XKnownNonNegative, XKnownNegative; 1898 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q); 1899 if (XKnownNegative) 1900 return true; 1901 1902 // If the shifter operand is a constant, and all of the bits shifted 1903 // out are known to be zero, and X is known non-zero then at least one 1904 // non-zero bit must remain. 1905 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 1906 APInt KnownZero(BitWidth, 0); 1907 APInt KnownOne(BitWidth, 0); 1908 computeKnownBits(X, KnownZero, KnownOne, Depth, Q); 1909 1910 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 1911 // Is there a known one in the portion not shifted out? 1912 if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal) 1913 return true; 1914 // Are all the bits to be shifted out known zero? 1915 if (KnownZero.countTrailingOnes() >= ShiftVal) 1916 return isKnownNonZero(X, Depth, Q); 1917 } 1918 } 1919 // div exact can only produce a zero if the dividend is zero. 1920 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 1921 return isKnownNonZero(X, Depth, Q); 1922 } 1923 // X + Y. 1924 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1925 bool XKnownNonNegative, XKnownNegative; 1926 bool YKnownNonNegative, YKnownNegative; 1927 ComputeSignBit(X, XKnownNonNegative, XKnownNegative, Depth, Q); 1928 ComputeSignBit(Y, YKnownNonNegative, YKnownNegative, Depth, Q); 1929 1930 // If X and Y are both non-negative (as signed values) then their sum is not 1931 // zero unless both X and Y are zero. 1932 if (XKnownNonNegative && YKnownNonNegative) 1933 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q)) 1934 return true; 1935 1936 // If X and Y are both negative (as signed values) then their sum is not 1937 // zero unless both X and Y equal INT_MIN. 1938 if (BitWidth && XKnownNegative && YKnownNegative) { 1939 APInt KnownZero(BitWidth, 0); 1940 APInt KnownOne(BitWidth, 0); 1941 APInt Mask = APInt::getSignedMaxValue(BitWidth); 1942 // The sign bit of X is set. If some other bit is set then X is not equal 1943 // to INT_MIN. 1944 computeKnownBits(X, KnownZero, KnownOne, Depth, Q); 1945 if ((KnownOne & Mask) != 0) 1946 return true; 1947 // The sign bit of Y is set. If some other bit is set then Y is not equal 1948 // to INT_MIN. 1949 computeKnownBits(Y, KnownZero, KnownOne, Depth, Q); 1950 if ((KnownOne & Mask) != 0) 1951 return true; 1952 } 1953 1954 // The sum of a non-negative number and a power of two is not zero. 1955 if (XKnownNonNegative && 1956 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 1957 return true; 1958 if (YKnownNonNegative && 1959 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 1960 return true; 1961 } 1962 // X * Y. 1963 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 1964 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1965 // If X and Y are non-zero then so is X * Y as long as the multiplication 1966 // does not overflow. 1967 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 1968 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q)) 1969 return true; 1970 } 1971 // (C ? X : Y) != 0 if X != 0 and Y != 0. 1972 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 1973 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) && 1974 isKnownNonZero(SI->getFalseValue(), Depth, Q)) 1975 return true; 1976 } 1977 // PHI 1978 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 1979 // Try and detect a recurrence that monotonically increases from a 1980 // starting value, as these are common as induction variables. 1981 if (PN->getNumIncomingValues() == 2) { 1982 Value *Start = PN->getIncomingValue(0); 1983 Value *Induction = PN->getIncomingValue(1); 1984 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 1985 std::swap(Start, Induction); 1986 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 1987 if (!C->isZero() && !C->isNegative()) { 1988 ConstantInt *X; 1989 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 1990 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 1991 !X->isNegative()) 1992 return true; 1993 } 1994 } 1995 } 1996 // Check if all incoming values are non-zero constant. 1997 bool AllNonZeroConstants = all_of(PN->operands(), [](Value *V) { 1998 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZeroValue(); 1999 }); 2000 if (AllNonZeroConstants) 2001 return true; 2002 } 2003 2004 if (!BitWidth) return false; 2005 APInt KnownZero(BitWidth, 0); 2006 APInt KnownOne(BitWidth, 0); 2007 computeKnownBits(V, KnownZero, KnownOne, Depth, Q); 2008 return KnownOne != 0; 2009 } 2010 2011 /// Return true if V2 == V1 + X, where X is known non-zero. 2012 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { 2013 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2014 if (!BO || BO->getOpcode() != Instruction::Add) 2015 return false; 2016 Value *Op = nullptr; 2017 if (V2 == BO->getOperand(0)) 2018 Op = BO->getOperand(1); 2019 else if (V2 == BO->getOperand(1)) 2020 Op = BO->getOperand(0); 2021 else 2022 return false; 2023 return isKnownNonZero(Op, 0, Q); 2024 } 2025 2026 /// Return true if it is known that V1 != V2. 2027 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { 2028 if (V1->getType()->isVectorTy() || V1 == V2) 2029 return false; 2030 if (V1->getType() != V2->getType()) 2031 // We can't look through casts yet. 2032 return false; 2033 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) 2034 return true; 2035 2036 if (IntegerType *Ty = dyn_cast<IntegerType>(V1->getType())) { 2037 // Are any known bits in V1 contradictory to known bits in V2? If V1 2038 // has a known zero where V2 has a known one, they must not be equal. 2039 auto BitWidth = Ty->getBitWidth(); 2040 APInt KnownZero1(BitWidth, 0); 2041 APInt KnownOne1(BitWidth, 0); 2042 computeKnownBits(V1, KnownZero1, KnownOne1, 0, Q); 2043 APInt KnownZero2(BitWidth, 0); 2044 APInt KnownOne2(BitWidth, 0); 2045 computeKnownBits(V2, KnownZero2, KnownOne2, 0, Q); 2046 2047 auto OppositeBits = (KnownZero1 & KnownOne2) | (KnownZero2 & KnownOne1); 2048 if (OppositeBits.getBoolValue()) 2049 return true; 2050 } 2051 return false; 2052 } 2053 2054 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2055 /// simplify operations downstream. Mask is known to be zero for bits that V 2056 /// cannot have. 2057 /// 2058 /// This function is defined on values with integer type, values with pointer 2059 /// type, and vectors of integers. In the case 2060 /// where V is a vector, the mask, known zero, and known one values are the 2061 /// same width as the vector element, and the bit is set only if it is true 2062 /// for all of the elements in the vector. 2063 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2064 const Query &Q) { 2065 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); 2066 computeKnownBits(V, KnownZero, KnownOne, Depth, Q); 2067 return (KnownZero & Mask) == Mask; 2068 } 2069 2070 /// For vector constants, loop over the elements and find the constant with the 2071 /// minimum number of sign bits. Return 0 if the value is not a vector constant 2072 /// or if any element was not analyzed; otherwise, return the count for the 2073 /// element with the minimum number of sign bits. 2074 static unsigned computeNumSignBitsVectorConstant(const Value *V, 2075 unsigned TyBits) { 2076 const auto *CV = dyn_cast<Constant>(V); 2077 if (!CV || !CV->getType()->isVectorTy()) 2078 return 0; 2079 2080 unsigned MinSignBits = TyBits; 2081 unsigned NumElts = CV->getType()->getVectorNumElements(); 2082 for (unsigned i = 0; i != NumElts; ++i) { 2083 // If we find a non-ConstantInt, bail out. 2084 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 2085 if (!Elt) 2086 return 0; 2087 2088 // If the sign bit is 1, flip the bits, so we always count leading zeros. 2089 APInt EltVal = Elt->getValue(); 2090 if (EltVal.isNegative()) 2091 EltVal = ~EltVal; 2092 MinSignBits = std::min(MinSignBits, EltVal.countLeadingZeros()); 2093 } 2094 2095 return MinSignBits; 2096 } 2097 2098 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2099 const Query &Q); 2100 2101 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 2102 const Query &Q) { 2103 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q); 2104 assert(Result > 0 && "At least one sign bit needs to be present!"); 2105 return Result; 2106 } 2107 2108 /// Return the number of times the sign bit of the register is replicated into 2109 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2110 /// (itself), but other cases can give us information. For example, immediately 2111 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2112 /// other, so we return 3. For vectors, return the number of sign bits for the 2113 /// vector element with the mininum number of known sign bits. 2114 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2115 const Query &Q) { 2116 2117 // We return the minimum number of sign bits that are guaranteed to be present 2118 // in V, so for undef we have to conservatively return 1. We don't have the 2119 // same behavior for poison though -- that's a FIXME today. 2120 2121 unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType()); 2122 unsigned Tmp, Tmp2; 2123 unsigned FirstAnswer = 1; 2124 2125 // Note that ConstantInt is handled by the general computeKnownBits case 2126 // below. 2127 2128 if (Depth == MaxDepth) 2129 return 1; // Limit search depth. 2130 2131 const Operator *U = dyn_cast<Operator>(V); 2132 switch (Operator::getOpcode(V)) { 2133 default: break; 2134 case Instruction::SExt: 2135 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2136 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 2137 2138 case Instruction::SDiv: { 2139 const APInt *Denominator; 2140 // sdiv X, C -> adds log(C) sign bits. 2141 if (match(U->getOperand(1), m_APInt(Denominator))) { 2142 2143 // Ignore non-positive denominator. 2144 if (!Denominator->isStrictlyPositive()) 2145 break; 2146 2147 // Calculate the incoming numerator bits. 2148 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2149 2150 // Add floor(log(C)) bits to the numerator bits. 2151 return std::min(TyBits, NumBits + Denominator->logBase2()); 2152 } 2153 break; 2154 } 2155 2156 case Instruction::SRem: { 2157 const APInt *Denominator; 2158 // srem X, C -> we know that the result is within [-C+1,C) when C is a 2159 // positive constant. This let us put a lower bound on the number of sign 2160 // bits. 2161 if (match(U->getOperand(1), m_APInt(Denominator))) { 2162 2163 // Ignore non-positive denominator. 2164 if (!Denominator->isStrictlyPositive()) 2165 break; 2166 2167 // Calculate the incoming numerator bits. SRem by a positive constant 2168 // can't lower the number of sign bits. 2169 unsigned NumrBits = 2170 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2171 2172 // Calculate the leading sign bit constraints by examining the 2173 // denominator. Given that the denominator is positive, there are two 2174 // cases: 2175 // 2176 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2177 // (1 << ceilLogBase2(C)). 2178 // 2179 // 2. the numerator is negative. Then the result range is (-C,0] and 2180 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2181 // 2182 // Thus a lower bound on the number of sign bits is `TyBits - 2183 // ceilLogBase2(C)`. 2184 2185 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2186 return std::max(NumrBits, ResBits); 2187 } 2188 break; 2189 } 2190 2191 case Instruction::AShr: { 2192 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2193 // ashr X, C -> adds C sign bits. Vectors too. 2194 const APInt *ShAmt; 2195 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2196 unsigned ShAmtLimited = ShAmt->getZExtValue(); 2197 if (ShAmtLimited >= TyBits) 2198 break; // Bad shift. 2199 Tmp += ShAmtLimited; 2200 if (Tmp > TyBits) Tmp = TyBits; 2201 } 2202 return Tmp; 2203 } 2204 case Instruction::Shl: { 2205 const APInt *ShAmt; 2206 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2207 // shl destroys sign bits. 2208 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2209 Tmp2 = ShAmt->getZExtValue(); 2210 if (Tmp2 >= TyBits || // Bad shift. 2211 Tmp2 >= Tmp) break; // Shifted all sign bits out. 2212 return Tmp - Tmp2; 2213 } 2214 break; 2215 } 2216 case Instruction::And: 2217 case Instruction::Or: 2218 case Instruction::Xor: // NOT is handled here. 2219 // Logical binary ops preserve the number of sign bits at the worst. 2220 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2221 if (Tmp != 1) { 2222 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2223 FirstAnswer = std::min(Tmp, Tmp2); 2224 // We computed what we know about the sign bits as our first 2225 // answer. Now proceed to the generic code that uses 2226 // computeKnownBits, and pick whichever answer is better. 2227 } 2228 break; 2229 2230 case Instruction::Select: 2231 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2232 if (Tmp == 1) return 1; // Early out. 2233 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 2234 return std::min(Tmp, Tmp2); 2235 2236 case Instruction::Add: 2237 // Add can have at most one carry bit. Thus we know that the output 2238 // is, at worst, one more bit than the inputs. 2239 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2240 if (Tmp == 1) return 1; // Early out. 2241 2242 // Special case decrementing a value (ADD X, -1): 2243 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2244 if (CRHS->isAllOnesValue()) { 2245 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 2246 computeKnownBits(U->getOperand(0), KnownZero, KnownOne, Depth + 1, Q); 2247 2248 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2249 // sign bits set. 2250 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 2251 return TyBits; 2252 2253 // If we are subtracting one from a positive number, there is no carry 2254 // out of the result. 2255 if (KnownZero.isNegative()) 2256 return Tmp; 2257 } 2258 2259 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2260 if (Tmp2 == 1) return 1; 2261 return std::min(Tmp, Tmp2)-1; 2262 2263 case Instruction::Sub: 2264 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2265 if (Tmp2 == 1) return 1; 2266 2267 // Handle NEG. 2268 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2269 if (CLHS->isNullValue()) { 2270 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 2271 computeKnownBits(U->getOperand(1), KnownZero, KnownOne, Depth + 1, Q); 2272 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2273 // sign bits set. 2274 if ((KnownZero | APInt(TyBits, 1)).isAllOnesValue()) 2275 return TyBits; 2276 2277 // If the input is known to be positive (the sign bit is known clear), 2278 // the output of the NEG has the same number of sign bits as the input. 2279 if (KnownZero.isNegative()) 2280 return Tmp2; 2281 2282 // Otherwise, we treat this like a SUB. 2283 } 2284 2285 // Sub can have at most one carry bit. Thus we know that the output 2286 // is, at worst, one more bit than the inputs. 2287 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2288 if (Tmp == 1) return 1; // Early out. 2289 return std::min(Tmp, Tmp2)-1; 2290 2291 case Instruction::PHI: { 2292 const PHINode *PN = cast<PHINode>(U); 2293 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2294 // Don't analyze large in-degree PHIs. 2295 if (NumIncomingValues > 4) break; 2296 // Unreachable blocks may have zero-operand PHI nodes. 2297 if (NumIncomingValues == 0) break; 2298 2299 // Take the minimum of all incoming values. This can't infinitely loop 2300 // because of our depth threshold. 2301 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q); 2302 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2303 if (Tmp == 1) return Tmp; 2304 Tmp = std::min( 2305 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q)); 2306 } 2307 return Tmp; 2308 } 2309 2310 case Instruction::Trunc: 2311 // FIXME: it's tricky to do anything useful for this, but it is an important 2312 // case for targets like X86. 2313 break; 2314 2315 case Instruction::ExtractElement: 2316 // Look through extract element. At the moment we keep this simple and skip 2317 // tracking the specific element. But at least we might find information 2318 // valid for all elements of the vector (for example if vector is sign 2319 // extended, shifted, etc). 2320 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2321 } 2322 2323 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2324 // use this information. 2325 2326 // If we can examine all elements of a vector constant successfully, we're 2327 // done (we can't do any better than that). If not, keep trying. 2328 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits)) 2329 return VecSignBits; 2330 2331 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 2332 computeKnownBits(V, KnownZero, KnownOne, Depth, Q); 2333 2334 // If we know that the sign bit is either zero or one, determine the number of 2335 // identical bits in the top of the input value. 2336 if (KnownZero.isNegative()) 2337 return std::max(FirstAnswer, KnownZero.countLeadingOnes()); 2338 2339 if (KnownOne.isNegative()) 2340 return std::max(FirstAnswer, KnownOne.countLeadingOnes()); 2341 2342 // computeKnownBits gave us no extra information about the top bits. 2343 return FirstAnswer; 2344 } 2345 2346 /// This function computes the integer multiple of Base that equals V. 2347 /// If successful, it returns true and returns the multiple in 2348 /// Multiple. If unsuccessful, it returns false. It looks 2349 /// through SExt instructions only if LookThroughSExt is true. 2350 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2351 bool LookThroughSExt, unsigned Depth) { 2352 const unsigned MaxDepth = 6; 2353 2354 assert(V && "No Value?"); 2355 assert(Depth <= MaxDepth && "Limit Search Depth"); 2356 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2357 2358 Type *T = V->getType(); 2359 2360 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2361 2362 if (Base == 0) 2363 return false; 2364 2365 if (Base == 1) { 2366 Multiple = V; 2367 return true; 2368 } 2369 2370 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2371 Constant *BaseVal = ConstantInt::get(T, Base); 2372 if (CO && CO == BaseVal) { 2373 // Multiple is 1. 2374 Multiple = ConstantInt::get(T, 1); 2375 return true; 2376 } 2377 2378 if (CI && CI->getZExtValue() % Base == 0) { 2379 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2380 return true; 2381 } 2382 2383 if (Depth == MaxDepth) return false; // Limit search depth. 2384 2385 Operator *I = dyn_cast<Operator>(V); 2386 if (!I) return false; 2387 2388 switch (I->getOpcode()) { 2389 default: break; 2390 case Instruction::SExt: 2391 if (!LookThroughSExt) return false; 2392 // otherwise fall through to ZExt 2393 case Instruction::ZExt: 2394 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2395 LookThroughSExt, Depth+1); 2396 case Instruction::Shl: 2397 case Instruction::Mul: { 2398 Value *Op0 = I->getOperand(0); 2399 Value *Op1 = I->getOperand(1); 2400 2401 if (I->getOpcode() == Instruction::Shl) { 2402 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2403 if (!Op1CI) return false; 2404 // Turn Op0 << Op1 into Op0 * 2^Op1 2405 APInt Op1Int = Op1CI->getValue(); 2406 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2407 APInt API(Op1Int.getBitWidth(), 0); 2408 API.setBit(BitToSet); 2409 Op1 = ConstantInt::get(V->getContext(), API); 2410 } 2411 2412 Value *Mul0 = nullptr; 2413 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2414 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2415 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2416 if (Op1C->getType()->getPrimitiveSizeInBits() < 2417 MulC->getType()->getPrimitiveSizeInBits()) 2418 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2419 if (Op1C->getType()->getPrimitiveSizeInBits() > 2420 MulC->getType()->getPrimitiveSizeInBits()) 2421 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2422 2423 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2424 Multiple = ConstantExpr::getMul(MulC, Op1C); 2425 return true; 2426 } 2427 2428 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2429 if (Mul0CI->getValue() == 1) { 2430 // V == Base * Op1, so return Op1 2431 Multiple = Op1; 2432 return true; 2433 } 2434 } 2435 2436 Value *Mul1 = nullptr; 2437 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2438 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2439 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2440 if (Op0C->getType()->getPrimitiveSizeInBits() < 2441 MulC->getType()->getPrimitiveSizeInBits()) 2442 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2443 if (Op0C->getType()->getPrimitiveSizeInBits() > 2444 MulC->getType()->getPrimitiveSizeInBits()) 2445 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2446 2447 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2448 Multiple = ConstantExpr::getMul(MulC, Op0C); 2449 return true; 2450 } 2451 2452 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2453 if (Mul1CI->getValue() == 1) { 2454 // V == Base * Op0, so return Op0 2455 Multiple = Op0; 2456 return true; 2457 } 2458 } 2459 } 2460 } 2461 2462 // We could not determine if V is a multiple of Base. 2463 return false; 2464 } 2465 2466 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS, 2467 const TargetLibraryInfo *TLI) { 2468 const Function *F = ICS.getCalledFunction(); 2469 if (!F) 2470 return Intrinsic::not_intrinsic; 2471 2472 if (F->isIntrinsic()) 2473 return F->getIntrinsicID(); 2474 2475 if (!TLI) 2476 return Intrinsic::not_intrinsic; 2477 2478 LibFunc Func; 2479 // We're going to make assumptions on the semantics of the functions, check 2480 // that the target knows that it's available in this environment and it does 2481 // not have local linkage. 2482 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func)) 2483 return Intrinsic::not_intrinsic; 2484 2485 if (!ICS.onlyReadsMemory()) 2486 return Intrinsic::not_intrinsic; 2487 2488 // Otherwise check if we have a call to a function that can be turned into a 2489 // vector intrinsic. 2490 switch (Func) { 2491 default: 2492 break; 2493 case LibFunc_sin: 2494 case LibFunc_sinf: 2495 case LibFunc_sinl: 2496 return Intrinsic::sin; 2497 case LibFunc_cos: 2498 case LibFunc_cosf: 2499 case LibFunc_cosl: 2500 return Intrinsic::cos; 2501 case LibFunc_exp: 2502 case LibFunc_expf: 2503 case LibFunc_expl: 2504 return Intrinsic::exp; 2505 case LibFunc_exp2: 2506 case LibFunc_exp2f: 2507 case LibFunc_exp2l: 2508 return Intrinsic::exp2; 2509 case LibFunc_log: 2510 case LibFunc_logf: 2511 case LibFunc_logl: 2512 return Intrinsic::log; 2513 case LibFunc_log10: 2514 case LibFunc_log10f: 2515 case LibFunc_log10l: 2516 return Intrinsic::log10; 2517 case LibFunc_log2: 2518 case LibFunc_log2f: 2519 case LibFunc_log2l: 2520 return Intrinsic::log2; 2521 case LibFunc_fabs: 2522 case LibFunc_fabsf: 2523 case LibFunc_fabsl: 2524 return Intrinsic::fabs; 2525 case LibFunc_fmin: 2526 case LibFunc_fminf: 2527 case LibFunc_fminl: 2528 return Intrinsic::minnum; 2529 case LibFunc_fmax: 2530 case LibFunc_fmaxf: 2531 case LibFunc_fmaxl: 2532 return Intrinsic::maxnum; 2533 case LibFunc_copysign: 2534 case LibFunc_copysignf: 2535 case LibFunc_copysignl: 2536 return Intrinsic::copysign; 2537 case LibFunc_floor: 2538 case LibFunc_floorf: 2539 case LibFunc_floorl: 2540 return Intrinsic::floor; 2541 case LibFunc_ceil: 2542 case LibFunc_ceilf: 2543 case LibFunc_ceill: 2544 return Intrinsic::ceil; 2545 case LibFunc_trunc: 2546 case LibFunc_truncf: 2547 case LibFunc_truncl: 2548 return Intrinsic::trunc; 2549 case LibFunc_rint: 2550 case LibFunc_rintf: 2551 case LibFunc_rintl: 2552 return Intrinsic::rint; 2553 case LibFunc_nearbyint: 2554 case LibFunc_nearbyintf: 2555 case LibFunc_nearbyintl: 2556 return Intrinsic::nearbyint; 2557 case LibFunc_round: 2558 case LibFunc_roundf: 2559 case LibFunc_roundl: 2560 return Intrinsic::round; 2561 case LibFunc_pow: 2562 case LibFunc_powf: 2563 case LibFunc_powl: 2564 return Intrinsic::pow; 2565 case LibFunc_sqrt: 2566 case LibFunc_sqrtf: 2567 case LibFunc_sqrtl: 2568 if (ICS->hasNoNaNs()) 2569 return Intrinsic::sqrt; 2570 return Intrinsic::not_intrinsic; 2571 } 2572 2573 return Intrinsic::not_intrinsic; 2574 } 2575 2576 /// Return true if we can prove that the specified FP value is never equal to 2577 /// -0.0. 2578 /// 2579 /// NOTE: this function will need to be revisited when we support non-default 2580 /// rounding modes! 2581 /// 2582 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 2583 unsigned Depth) { 2584 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 2585 return !CFP->getValueAPF().isNegZero(); 2586 2587 if (Depth == MaxDepth) 2588 return false; // Limit search depth. 2589 2590 const Operator *I = dyn_cast<Operator>(V); 2591 if (!I) return false; 2592 2593 // Check if the nsz fast-math flag is set 2594 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I)) 2595 if (FPO->hasNoSignedZeros()) 2596 return true; 2597 2598 // (add x, 0.0) is guaranteed to return +0.0, not -0.0. 2599 if (I->getOpcode() == Instruction::FAdd) 2600 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1))) 2601 if (CFP->isNullValue()) 2602 return true; 2603 2604 // sitofp and uitofp turn into +0.0 for zero. 2605 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) 2606 return true; 2607 2608 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 2609 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2610 switch (IID) { 2611 default: 2612 break; 2613 // sqrt(-0.0) = -0.0, no other negative results are possible. 2614 case Intrinsic::sqrt: 2615 return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1); 2616 // fabs(x) != -0.0 2617 case Intrinsic::fabs: 2618 return true; 2619 } 2620 } 2621 2622 return false; 2623 } 2624 2625 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 2626 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 2627 /// bit despite comparing equal. 2628 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 2629 const TargetLibraryInfo *TLI, 2630 bool SignBitOnly, 2631 unsigned Depth) { 2632 // TODO: This function does not do the right thing when SignBitOnly is true 2633 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 2634 // which flips the sign bits of NaNs. See 2635 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2636 2637 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2638 return !CFP->getValueAPF().isNegative() || 2639 (!SignBitOnly && CFP->getValueAPF().isZero()); 2640 } 2641 2642 if (Depth == MaxDepth) 2643 return false; // Limit search depth. 2644 2645 const Operator *I = dyn_cast<Operator>(V); 2646 if (!I) 2647 return false; 2648 2649 switch (I->getOpcode()) { 2650 default: 2651 break; 2652 // Unsigned integers are always nonnegative. 2653 case Instruction::UIToFP: 2654 return true; 2655 case Instruction::FMul: 2656 // x*x is always non-negative or a NaN. 2657 if (I->getOperand(0) == I->getOperand(1) && 2658 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 2659 return true; 2660 2661 LLVM_FALLTHROUGH; 2662 case Instruction::FAdd: 2663 case Instruction::FDiv: 2664 case Instruction::FRem: 2665 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2666 Depth + 1) && 2667 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2668 Depth + 1); 2669 case Instruction::Select: 2670 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2671 Depth + 1) && 2672 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2673 Depth + 1); 2674 case Instruction::FPExt: 2675 case Instruction::FPTrunc: 2676 // Widening/narrowing never change sign. 2677 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2678 Depth + 1); 2679 case Instruction::Call: 2680 const auto *CI = cast<CallInst>(I); 2681 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2682 switch (IID) { 2683 default: 2684 break; 2685 case Intrinsic::maxnum: 2686 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2687 Depth + 1) || 2688 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2689 Depth + 1); 2690 case Intrinsic::minnum: 2691 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2692 Depth + 1) && 2693 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2694 Depth + 1); 2695 case Intrinsic::exp: 2696 case Intrinsic::exp2: 2697 case Intrinsic::fabs: 2698 return true; 2699 2700 case Intrinsic::sqrt: 2701 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 2702 if (!SignBitOnly) 2703 return true; 2704 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 2705 CannotBeNegativeZero(CI->getOperand(0), TLI)); 2706 2707 case Intrinsic::powi: 2708 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 2709 // powi(x,n) is non-negative if n is even. 2710 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 2711 return true; 2712 } 2713 // TODO: This is not correct. Given that exp is an integer, here are the 2714 // ways that pow can return a negative value: 2715 // 2716 // pow(x, exp) --> negative if exp is odd and x is negative. 2717 // pow(-0, exp) --> -inf if exp is negative odd. 2718 // pow(-0, exp) --> -0 if exp is positive odd. 2719 // pow(-inf, exp) --> -0 if exp is negative odd. 2720 // pow(-inf, exp) --> -inf if exp is positive odd. 2721 // 2722 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 2723 // but we must return false if x == -0. Unfortunately we do not currently 2724 // have a way of expressing this constraint. See details in 2725 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2726 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2727 Depth + 1); 2728 2729 case Intrinsic::fma: 2730 case Intrinsic::fmuladd: 2731 // x*x+y is non-negative if y is non-negative. 2732 return I->getOperand(0) == I->getOperand(1) && 2733 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 2734 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2735 Depth + 1); 2736 } 2737 break; 2738 } 2739 return false; 2740 } 2741 2742 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 2743 const TargetLibraryInfo *TLI) { 2744 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 2745 } 2746 2747 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 2748 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 2749 } 2750 2751 /// If the specified value can be set by repeating the same byte in memory, 2752 /// return the i8 value that it is represented with. This is 2753 /// true for all i8 values obviously, but is also true for i32 0, i32 -1, 2754 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 2755 /// byte store (e.g. i16 0x1234), return null. 2756 Value *llvm::isBytewiseValue(Value *V) { 2757 // All byte-wide stores are splatable, even of arbitrary variables. 2758 if (V->getType()->isIntegerTy(8)) return V; 2759 2760 // Handle 'null' ConstantArrayZero etc. 2761 if (Constant *C = dyn_cast<Constant>(V)) 2762 if (C->isNullValue()) 2763 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 2764 2765 // Constant float and double values can be handled as integer values if the 2766 // corresponding integer value is "byteable". An important case is 0.0. 2767 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2768 if (CFP->getType()->isFloatTy()) 2769 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 2770 if (CFP->getType()->isDoubleTy()) 2771 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 2772 // Don't handle long double formats, which have strange constraints. 2773 } 2774 2775 // We can handle constant integers that are multiple of 8 bits. 2776 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 2777 if (CI->getBitWidth() % 8 == 0) { 2778 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 2779 2780 if (!CI->getValue().isSplat(8)) 2781 return nullptr; 2782 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8)); 2783 } 2784 } 2785 2786 // A ConstantDataArray/Vector is splatable if all its members are equal and 2787 // also splatable. 2788 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 2789 Value *Elt = CA->getElementAsConstant(0); 2790 Value *Val = isBytewiseValue(Elt); 2791 if (!Val) 2792 return nullptr; 2793 2794 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 2795 if (CA->getElementAsConstant(I) != Elt) 2796 return nullptr; 2797 2798 return Val; 2799 } 2800 2801 // Conceptually, we could handle things like: 2802 // %a = zext i8 %X to i16 2803 // %b = shl i16 %a, 8 2804 // %c = or i16 %a, %b 2805 // but until there is an example that actually needs this, it doesn't seem 2806 // worth worrying about. 2807 return nullptr; 2808 } 2809 2810 2811 // This is the recursive version of BuildSubAggregate. It takes a few different 2812 // arguments. Idxs is the index within the nested struct From that we are 2813 // looking at now (which is of type IndexedType). IdxSkip is the number of 2814 // indices from Idxs that should be left out when inserting into the resulting 2815 // struct. To is the result struct built so far, new insertvalue instructions 2816 // build on that. 2817 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 2818 SmallVectorImpl<unsigned> &Idxs, 2819 unsigned IdxSkip, 2820 Instruction *InsertBefore) { 2821 llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType); 2822 if (STy) { 2823 // Save the original To argument so we can modify it 2824 Value *OrigTo = To; 2825 // General case, the type indexed by Idxs is a struct 2826 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2827 // Process each struct element recursively 2828 Idxs.push_back(i); 2829 Value *PrevTo = To; 2830 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 2831 InsertBefore); 2832 Idxs.pop_back(); 2833 if (!To) { 2834 // Couldn't find any inserted value for this index? Cleanup 2835 while (PrevTo != OrigTo) { 2836 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 2837 PrevTo = Del->getAggregateOperand(); 2838 Del->eraseFromParent(); 2839 } 2840 // Stop processing elements 2841 break; 2842 } 2843 } 2844 // If we successfully found a value for each of our subaggregates 2845 if (To) 2846 return To; 2847 } 2848 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 2849 // the struct's elements had a value that was inserted directly. In the latter 2850 // case, perhaps we can't determine each of the subelements individually, but 2851 // we might be able to find the complete struct somewhere. 2852 2853 // Find the value that is at that particular spot 2854 Value *V = FindInsertedValue(From, Idxs); 2855 2856 if (!V) 2857 return nullptr; 2858 2859 // Insert the value in the new (sub) aggregrate 2860 return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 2861 "tmp", InsertBefore); 2862 } 2863 2864 // This helper takes a nested struct and extracts a part of it (which is again a 2865 // struct) into a new value. For example, given the struct: 2866 // { a, { b, { c, d }, e } } 2867 // and the indices "1, 1" this returns 2868 // { c, d }. 2869 // 2870 // It does this by inserting an insertvalue for each element in the resulting 2871 // struct, as opposed to just inserting a single struct. This will only work if 2872 // each of the elements of the substruct are known (ie, inserted into From by an 2873 // insertvalue instruction somewhere). 2874 // 2875 // All inserted insertvalue instructions are inserted before InsertBefore 2876 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 2877 Instruction *InsertBefore) { 2878 assert(InsertBefore && "Must have someplace to insert!"); 2879 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 2880 idx_range); 2881 Value *To = UndefValue::get(IndexedType); 2882 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 2883 unsigned IdxSkip = Idxs.size(); 2884 2885 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 2886 } 2887 2888 /// Given an aggregrate and an sequence of indices, see if 2889 /// the scalar value indexed is already around as a register, for example if it 2890 /// were inserted directly into the aggregrate. 2891 /// 2892 /// If InsertBefore is not null, this function will duplicate (modified) 2893 /// insertvalues when a part of a nested struct is extracted. 2894 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 2895 Instruction *InsertBefore) { 2896 // Nothing to index? Just return V then (this is useful at the end of our 2897 // recursion). 2898 if (idx_range.empty()) 2899 return V; 2900 // We have indices, so V should have an indexable type. 2901 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 2902 "Not looking at a struct or array?"); 2903 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 2904 "Invalid indices for type?"); 2905 2906 if (Constant *C = dyn_cast<Constant>(V)) { 2907 C = C->getAggregateElement(idx_range[0]); 2908 if (!C) return nullptr; 2909 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 2910 } 2911 2912 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 2913 // Loop the indices for the insertvalue instruction in parallel with the 2914 // requested indices 2915 const unsigned *req_idx = idx_range.begin(); 2916 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 2917 i != e; ++i, ++req_idx) { 2918 if (req_idx == idx_range.end()) { 2919 // We can't handle this without inserting insertvalues 2920 if (!InsertBefore) 2921 return nullptr; 2922 2923 // The requested index identifies a part of a nested aggregate. Handle 2924 // this specially. For example, 2925 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 2926 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 2927 // %C = extractvalue {i32, { i32, i32 } } %B, 1 2928 // This can be changed into 2929 // %A = insertvalue {i32, i32 } undef, i32 10, 0 2930 // %C = insertvalue {i32, i32 } %A, i32 11, 1 2931 // which allows the unused 0,0 element from the nested struct to be 2932 // removed. 2933 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 2934 InsertBefore); 2935 } 2936 2937 // This insert value inserts something else than what we are looking for. 2938 // See if the (aggregate) value inserted into has the value we are 2939 // looking for, then. 2940 if (*req_idx != *i) 2941 return FindInsertedValue(I->getAggregateOperand(), idx_range, 2942 InsertBefore); 2943 } 2944 // If we end up here, the indices of the insertvalue match with those 2945 // requested (though possibly only partially). Now we recursively look at 2946 // the inserted value, passing any remaining indices. 2947 return FindInsertedValue(I->getInsertedValueOperand(), 2948 makeArrayRef(req_idx, idx_range.end()), 2949 InsertBefore); 2950 } 2951 2952 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 2953 // If we're extracting a value from an aggregate that was extracted from 2954 // something else, we can extract from that something else directly instead. 2955 // However, we will need to chain I's indices with the requested indices. 2956 2957 // Calculate the number of indices required 2958 unsigned size = I->getNumIndices() + idx_range.size(); 2959 // Allocate some space to put the new indices in 2960 SmallVector<unsigned, 5> Idxs; 2961 Idxs.reserve(size); 2962 // Add indices from the extract value instruction 2963 Idxs.append(I->idx_begin(), I->idx_end()); 2964 2965 // Add requested indices 2966 Idxs.append(idx_range.begin(), idx_range.end()); 2967 2968 assert(Idxs.size() == size 2969 && "Number of indices added not correct?"); 2970 2971 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 2972 } 2973 // Otherwise, we don't know (such as, extracting from a function return value 2974 // or load instruction) 2975 return nullptr; 2976 } 2977 2978 /// Analyze the specified pointer to see if it can be expressed as a base 2979 /// pointer plus a constant offset. Return the base and offset to the caller. 2980 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 2981 const DataLayout &DL) { 2982 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType()); 2983 APInt ByteOffset(BitWidth, 0); 2984 2985 // We walk up the defs but use a visited set to handle unreachable code. In 2986 // that case, we stop after accumulating the cycle once (not that it 2987 // matters). 2988 SmallPtrSet<Value *, 16> Visited; 2989 while (Visited.insert(Ptr).second) { 2990 if (Ptr->getType()->isVectorTy()) 2991 break; 2992 2993 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 2994 // If one of the values we have visited is an addrspacecast, then 2995 // the pointer type of this GEP may be different from the type 2996 // of the Ptr parameter which was passed to this function. This 2997 // means when we construct GEPOffset, we need to use the size 2998 // of GEP's pointer type rather than the size of the original 2999 // pointer type. 3000 APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0); 3001 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 3002 break; 3003 3004 ByteOffset += GEPOffset.getSExtValue(); 3005 3006 Ptr = GEP->getPointerOperand(); 3007 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 3008 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 3009 Ptr = cast<Operator>(Ptr)->getOperand(0); 3010 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 3011 if (GA->isInterposable()) 3012 break; 3013 Ptr = GA->getAliasee(); 3014 } else { 3015 break; 3016 } 3017 } 3018 Offset = ByteOffset.getSExtValue(); 3019 return Ptr; 3020 } 3021 3022 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP) { 3023 // Make sure the GEP has exactly three arguments. 3024 if (GEP->getNumOperands() != 3) 3025 return false; 3026 3027 // Make sure the index-ee is a pointer to array of i8. 3028 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 3029 if (!AT || !AT->getElementType()->isIntegerTy(8)) 3030 return false; 3031 3032 // Check to make sure that the first operand of the GEP is an integer and 3033 // has value 0 so that we are sure we're indexing into the initializer. 3034 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 3035 if (!FirstIdx || !FirstIdx->isZero()) 3036 return false; 3037 3038 return true; 3039 } 3040 3041 /// This function computes the length of a null-terminated C string pointed to 3042 /// by V. If successful, it returns true and returns the string in Str. 3043 /// If unsuccessful, it returns false. 3044 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 3045 uint64_t Offset, bool TrimAtNul) { 3046 assert(V); 3047 3048 // Look through bitcast instructions and geps. 3049 V = V->stripPointerCasts(); 3050 3051 // If the value is a GEP instruction or constant expression, treat it as an 3052 // offset. 3053 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3054 // The GEP operator should be based on a pointer to string constant, and is 3055 // indexing into the string constant. 3056 if (!isGEPBasedOnPointerToString(GEP)) 3057 return false; 3058 3059 // If the second index isn't a ConstantInt, then this is a variable index 3060 // into the array. If this occurs, we can't say anything meaningful about 3061 // the string. 3062 uint64_t StartIdx = 0; 3063 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 3064 StartIdx = CI->getZExtValue(); 3065 else 3066 return false; 3067 return getConstantStringInfo(GEP->getOperand(0), Str, StartIdx + Offset, 3068 TrimAtNul); 3069 } 3070 3071 // The GEP instruction, constant or instruction, must reference a global 3072 // variable that is a constant and is initialized. The referenced constant 3073 // initializer is the array that we'll use for optimization. 3074 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 3075 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 3076 return false; 3077 3078 // Handle the all-zeros case. 3079 if (GV->getInitializer()->isNullValue()) { 3080 // This is a degenerate case. The initializer is constant zero so the 3081 // length of the string must be zero. 3082 Str = ""; 3083 return true; 3084 } 3085 3086 // This must be a ConstantDataArray. 3087 const auto *Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 3088 if (!Array || !Array->isString()) 3089 return false; 3090 3091 // Get the number of elements in the array. 3092 uint64_t NumElts = Array->getType()->getArrayNumElements(); 3093 3094 // Start out with the entire array in the StringRef. 3095 Str = Array->getAsString(); 3096 3097 if (Offset > NumElts) 3098 return false; 3099 3100 // Skip over 'offset' bytes. 3101 Str = Str.substr(Offset); 3102 3103 if (TrimAtNul) { 3104 // Trim off the \0 and anything after it. If the array is not nul 3105 // terminated, we just return the whole end of string. The client may know 3106 // some other way that the string is length-bound. 3107 Str = Str.substr(0, Str.find('\0')); 3108 } 3109 return true; 3110 } 3111 3112 // These next two are very similar to the above, but also look through PHI 3113 // nodes. 3114 // TODO: See if we can integrate these two together. 3115 3116 /// If we can compute the length of the string pointed to by 3117 /// the specified pointer, return 'len+1'. If we can't, return 0. 3118 static uint64_t GetStringLengthH(const Value *V, 3119 SmallPtrSetImpl<const PHINode*> &PHIs) { 3120 // Look through noop bitcast instructions. 3121 V = V->stripPointerCasts(); 3122 3123 // If this is a PHI node, there are two cases: either we have already seen it 3124 // or we haven't. 3125 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 3126 if (!PHIs.insert(PN).second) 3127 return ~0ULL; // already in the set. 3128 3129 // If it was new, see if all the input strings are the same length. 3130 uint64_t LenSoFar = ~0ULL; 3131 for (Value *IncValue : PN->incoming_values()) { 3132 uint64_t Len = GetStringLengthH(IncValue, PHIs); 3133 if (Len == 0) return 0; // Unknown length -> unknown. 3134 3135 if (Len == ~0ULL) continue; 3136 3137 if (Len != LenSoFar && LenSoFar != ~0ULL) 3138 return 0; // Disagree -> unknown. 3139 LenSoFar = Len; 3140 } 3141 3142 // Success, all agree. 3143 return LenSoFar; 3144 } 3145 3146 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 3147 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 3148 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs); 3149 if (Len1 == 0) return 0; 3150 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs); 3151 if (Len2 == 0) return 0; 3152 if (Len1 == ~0ULL) return Len2; 3153 if (Len2 == ~0ULL) return Len1; 3154 if (Len1 != Len2) return 0; 3155 return Len1; 3156 } 3157 3158 // Otherwise, see if we can read the string. 3159 StringRef StrData; 3160 if (!getConstantStringInfo(V, StrData)) 3161 return 0; 3162 3163 return StrData.size()+1; 3164 } 3165 3166 /// If we can compute the length of the string pointed to by 3167 /// the specified pointer, return 'len+1'. If we can't, return 0. 3168 uint64_t llvm::GetStringLength(const Value *V) { 3169 if (!V->getType()->isPointerTy()) return 0; 3170 3171 SmallPtrSet<const PHINode*, 32> PHIs; 3172 uint64_t Len = GetStringLengthH(V, PHIs); 3173 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 3174 // an empty string as a length. 3175 return Len == ~0ULL ? 1 : Len; 3176 } 3177 3178 /// \brief \p PN defines a loop-variant pointer to an object. Check if the 3179 /// previous iteration of the loop was referring to the same object as \p PN. 3180 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 3181 const LoopInfo *LI) { 3182 // Find the loop-defined value. 3183 Loop *L = LI->getLoopFor(PN->getParent()); 3184 if (PN->getNumIncomingValues() != 2) 3185 return true; 3186 3187 // Find the value from previous iteration. 3188 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 3189 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3190 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 3191 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3192 return true; 3193 3194 // If a new pointer is loaded in the loop, the pointer references a different 3195 // object in every iteration. E.g.: 3196 // for (i) 3197 // int *p = a[i]; 3198 // ... 3199 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 3200 if (!L->isLoopInvariant(Load->getPointerOperand())) 3201 return false; 3202 return true; 3203 } 3204 3205 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 3206 unsigned MaxLookup) { 3207 if (!V->getType()->isPointerTy()) 3208 return V; 3209 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 3210 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3211 V = GEP->getPointerOperand(); 3212 } else if (Operator::getOpcode(V) == Instruction::BitCast || 3213 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 3214 V = cast<Operator>(V)->getOperand(0); 3215 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 3216 if (GA->isInterposable()) 3217 return V; 3218 V = GA->getAliasee(); 3219 } else { 3220 if (auto CS = CallSite(V)) 3221 if (Value *RV = CS.getReturnedArgOperand()) { 3222 V = RV; 3223 continue; 3224 } 3225 3226 // See if InstructionSimplify knows any relevant tricks. 3227 if (Instruction *I = dyn_cast<Instruction>(V)) 3228 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 3229 if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) { 3230 V = Simplified; 3231 continue; 3232 } 3233 3234 return V; 3235 } 3236 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 3237 } 3238 return V; 3239 } 3240 3241 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 3242 const DataLayout &DL, LoopInfo *LI, 3243 unsigned MaxLookup) { 3244 SmallPtrSet<Value *, 4> Visited; 3245 SmallVector<Value *, 4> Worklist; 3246 Worklist.push_back(V); 3247 do { 3248 Value *P = Worklist.pop_back_val(); 3249 P = GetUnderlyingObject(P, DL, MaxLookup); 3250 3251 if (!Visited.insert(P).second) 3252 continue; 3253 3254 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 3255 Worklist.push_back(SI->getTrueValue()); 3256 Worklist.push_back(SI->getFalseValue()); 3257 continue; 3258 } 3259 3260 if (PHINode *PN = dyn_cast<PHINode>(P)) { 3261 // If this PHI changes the underlying object in every iteration of the 3262 // loop, don't look through it. Consider: 3263 // int **A; 3264 // for (i) { 3265 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 3266 // Curr = A[i]; 3267 // *Prev, *Curr; 3268 // 3269 // Prev is tracking Curr one iteration behind so they refer to different 3270 // underlying objects. 3271 if (!LI || !LI->isLoopHeader(PN->getParent()) || 3272 isSameUnderlyingObjectInLoop(PN, LI)) 3273 for (Value *IncValue : PN->incoming_values()) 3274 Worklist.push_back(IncValue); 3275 continue; 3276 } 3277 3278 Objects.push_back(P); 3279 } while (!Worklist.empty()); 3280 } 3281 3282 /// Return true if the only users of this pointer are lifetime markers. 3283 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 3284 for (const User *U : V->users()) { 3285 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3286 if (!II) return false; 3287 3288 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 3289 II->getIntrinsicID() != Intrinsic::lifetime_end) 3290 return false; 3291 } 3292 return true; 3293 } 3294 3295 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3296 const Instruction *CtxI, 3297 const DominatorTree *DT) { 3298 const Operator *Inst = dyn_cast<Operator>(V); 3299 if (!Inst) 3300 return false; 3301 3302 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3303 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3304 if (C->canTrap()) 3305 return false; 3306 3307 switch (Inst->getOpcode()) { 3308 default: 3309 return true; 3310 case Instruction::UDiv: 3311 case Instruction::URem: { 3312 // x / y is undefined if y == 0. 3313 const APInt *V; 3314 if (match(Inst->getOperand(1), m_APInt(V))) 3315 return *V != 0; 3316 return false; 3317 } 3318 case Instruction::SDiv: 3319 case Instruction::SRem: { 3320 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3321 const APInt *Numerator, *Denominator; 3322 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3323 return false; 3324 // We cannot hoist this division if the denominator is 0. 3325 if (*Denominator == 0) 3326 return false; 3327 // It's safe to hoist if the denominator is not 0 or -1. 3328 if (*Denominator != -1) 3329 return true; 3330 // At this point we know that the denominator is -1. It is safe to hoist as 3331 // long we know that the numerator is not INT_MIN. 3332 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3333 return !Numerator->isMinSignedValue(); 3334 // The numerator *might* be MinSignedValue. 3335 return false; 3336 } 3337 case Instruction::Load: { 3338 const LoadInst *LI = cast<LoadInst>(Inst); 3339 if (!LI->isUnordered() || 3340 // Speculative load may create a race that did not exist in the source. 3341 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) || 3342 // Speculative load may load data from dirty regions. 3343 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress)) 3344 return false; 3345 const DataLayout &DL = LI->getModule()->getDataLayout(); 3346 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), 3347 LI->getAlignment(), DL, CtxI, DT); 3348 } 3349 case Instruction::Call: { 3350 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 3351 switch (II->getIntrinsicID()) { 3352 // These synthetic intrinsics have no side-effects and just mark 3353 // information about their operands. 3354 // FIXME: There are other no-op synthetic instructions that potentially 3355 // should be considered at least *safe* to speculate... 3356 case Intrinsic::dbg_declare: 3357 case Intrinsic::dbg_value: 3358 return true; 3359 3360 case Intrinsic::bitreverse: 3361 case Intrinsic::bswap: 3362 case Intrinsic::ctlz: 3363 case Intrinsic::ctpop: 3364 case Intrinsic::cttz: 3365 case Intrinsic::objectsize: 3366 case Intrinsic::sadd_with_overflow: 3367 case Intrinsic::smul_with_overflow: 3368 case Intrinsic::ssub_with_overflow: 3369 case Intrinsic::uadd_with_overflow: 3370 case Intrinsic::umul_with_overflow: 3371 case Intrinsic::usub_with_overflow: 3372 return true; 3373 // These intrinsics are defined to have the same behavior as libm 3374 // functions except for setting errno. 3375 case Intrinsic::sqrt: 3376 case Intrinsic::fma: 3377 case Intrinsic::fmuladd: 3378 return true; 3379 // These intrinsics are defined to have the same behavior as libm 3380 // functions, and the corresponding libm functions never set errno. 3381 case Intrinsic::trunc: 3382 case Intrinsic::copysign: 3383 case Intrinsic::fabs: 3384 case Intrinsic::minnum: 3385 case Intrinsic::maxnum: 3386 return true; 3387 // These intrinsics are defined to have the same behavior as libm 3388 // functions, which never overflow when operating on the IEEE754 types 3389 // that we support, and never set errno otherwise. 3390 case Intrinsic::ceil: 3391 case Intrinsic::floor: 3392 case Intrinsic::nearbyint: 3393 case Intrinsic::rint: 3394 case Intrinsic::round: 3395 return true; 3396 // These intrinsics do not correspond to any libm function, and 3397 // do not set errno. 3398 case Intrinsic::powi: 3399 return true; 3400 // TODO: are convert_{from,to}_fp16 safe? 3401 // TODO: can we list target-specific intrinsics here? 3402 default: break; 3403 } 3404 } 3405 return false; // The called function could have undefined behavior or 3406 // side-effects, even if marked readnone nounwind. 3407 } 3408 case Instruction::VAArg: 3409 case Instruction::Alloca: 3410 case Instruction::Invoke: 3411 case Instruction::PHI: 3412 case Instruction::Store: 3413 case Instruction::Ret: 3414 case Instruction::Br: 3415 case Instruction::IndirectBr: 3416 case Instruction::Switch: 3417 case Instruction::Unreachable: 3418 case Instruction::Fence: 3419 case Instruction::AtomicRMW: 3420 case Instruction::AtomicCmpXchg: 3421 case Instruction::LandingPad: 3422 case Instruction::Resume: 3423 case Instruction::CatchSwitch: 3424 case Instruction::CatchPad: 3425 case Instruction::CatchRet: 3426 case Instruction::CleanupPad: 3427 case Instruction::CleanupRet: 3428 return false; // Misc instructions which have effects 3429 } 3430 } 3431 3432 bool llvm::mayBeMemoryDependent(const Instruction &I) { 3433 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 3434 } 3435 3436 /// Return true if we know that the specified value is never null. 3437 bool llvm::isKnownNonNull(const Value *V) { 3438 assert(V->getType()->isPointerTy() && "V must be pointer type"); 3439 3440 // Alloca never returns null, malloc might. 3441 if (isa<AllocaInst>(V)) return true; 3442 3443 // A byval, inalloca, or nonnull argument is never null. 3444 if (const Argument *A = dyn_cast<Argument>(V)) 3445 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr(); 3446 3447 // A global variable in address space 0 is non null unless extern weak 3448 // or an absolute symbol reference. Other address spaces may have null as a 3449 // valid address for a global, so we can't assume anything. 3450 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 3451 return !GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 3452 GV->getType()->getAddressSpace() == 0; 3453 3454 // A Load tagged with nonnull metadata is never null. 3455 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 3456 return LI->getMetadata(LLVMContext::MD_nonnull); 3457 3458 if (auto CS = ImmutableCallSite(V)) 3459 if (CS.isReturnNonNull()) 3460 return true; 3461 3462 return false; 3463 } 3464 3465 static bool isKnownNonNullFromDominatingCondition(const Value *V, 3466 const Instruction *CtxI, 3467 const DominatorTree *DT) { 3468 assert(V->getType()->isPointerTy() && "V must be pointer type"); 3469 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull"); 3470 assert(CtxI && "Context instruction required for analysis"); 3471 assert(DT && "Dominator tree required for analysis"); 3472 3473 unsigned NumUsesExplored = 0; 3474 for (auto *U : V->users()) { 3475 // Avoid massive lists 3476 if (NumUsesExplored >= DomConditionsMaxUses) 3477 break; 3478 NumUsesExplored++; 3479 3480 // If the value is used as an argument to a call or invoke, then argument 3481 // attributes may provide an answer about null-ness. 3482 if (auto CS = ImmutableCallSite(U)) 3483 if (auto *CalledFunc = CS.getCalledFunction()) 3484 for (const Argument &Arg : CalledFunc->args()) 3485 if (CS.getArgOperand(Arg.getArgNo()) == V && 3486 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) 3487 return true; 3488 3489 // Consider only compare instructions uniquely controlling a branch 3490 CmpInst::Predicate Pred; 3491 if (!match(const_cast<User *>(U), 3492 m_c_ICmp(Pred, m_Specific(V), m_Zero())) || 3493 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) 3494 continue; 3495 3496 for (auto *CmpU : U->users()) { 3497 if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) { 3498 assert(BI->isConditional() && "uses a comparison!"); 3499 3500 BasicBlock *NonNullSuccessor = 3501 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); 3502 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 3503 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 3504 return true; 3505 } else if (Pred == ICmpInst::ICMP_NE && 3506 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) && 3507 DT->dominates(cast<Instruction>(CmpU), CtxI)) { 3508 return true; 3509 } 3510 } 3511 } 3512 3513 return false; 3514 } 3515 3516 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI, 3517 const DominatorTree *DT) { 3518 if (isa<ConstantPointerNull>(V) || isa<UndefValue>(V)) 3519 return false; 3520 3521 if (isKnownNonNull(V)) 3522 return true; 3523 3524 if (!CtxI || !DT) 3525 return false; 3526 3527 return ::isKnownNonNullFromDominatingCondition(V, CtxI, DT); 3528 } 3529 3530 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS, 3531 const Value *RHS, 3532 const DataLayout &DL, 3533 AssumptionCache *AC, 3534 const Instruction *CxtI, 3535 const DominatorTree *DT) { 3536 // Multiplying n * m significant bits yields a result of n + m significant 3537 // bits. If the total number of significant bits does not exceed the 3538 // result bit width (minus 1), there is no overflow. 3539 // This means if we have enough leading zero bits in the operands 3540 // we can guarantee that the result does not overflow. 3541 // Ref: "Hacker's Delight" by Henry Warren 3542 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3543 APInt LHSKnownZero(BitWidth, 0); 3544 APInt LHSKnownOne(BitWidth, 0); 3545 APInt RHSKnownZero(BitWidth, 0); 3546 APInt RHSKnownOne(BitWidth, 0); 3547 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI, 3548 DT); 3549 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI, 3550 DT); 3551 // Note that underestimating the number of zero bits gives a more 3552 // conservative answer. 3553 unsigned ZeroBits = LHSKnownZero.countLeadingOnes() + 3554 RHSKnownZero.countLeadingOnes(); 3555 // First handle the easy case: if we have enough zero bits there's 3556 // definitely no overflow. 3557 if (ZeroBits >= BitWidth) 3558 return OverflowResult::NeverOverflows; 3559 3560 // Get the largest possible values for each operand. 3561 APInt LHSMax = ~LHSKnownZero; 3562 APInt RHSMax = ~RHSKnownZero; 3563 3564 // We know the multiply operation doesn't overflow if the maximum values for 3565 // each operand will not overflow after we multiply them together. 3566 bool MaxOverflow; 3567 LHSMax.umul_ov(RHSMax, MaxOverflow); 3568 if (!MaxOverflow) 3569 return OverflowResult::NeverOverflows; 3570 3571 // We know it always overflows if multiplying the smallest possible values for 3572 // the operands also results in overflow. 3573 bool MinOverflow; 3574 LHSKnownOne.umul_ov(RHSKnownOne, MinOverflow); 3575 if (MinOverflow) 3576 return OverflowResult::AlwaysOverflows; 3577 3578 return OverflowResult::MayOverflow; 3579 } 3580 3581 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS, 3582 const Value *RHS, 3583 const DataLayout &DL, 3584 AssumptionCache *AC, 3585 const Instruction *CxtI, 3586 const DominatorTree *DT) { 3587 bool LHSKnownNonNegative, LHSKnownNegative; 3588 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0, 3589 AC, CxtI, DT); 3590 if (LHSKnownNonNegative || LHSKnownNegative) { 3591 bool RHSKnownNonNegative, RHSKnownNegative; 3592 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0, 3593 AC, CxtI, DT); 3594 3595 if (LHSKnownNegative && RHSKnownNegative) { 3596 // The sign bit is set in both cases: this MUST overflow. 3597 // Create a simple add instruction, and insert it into the struct. 3598 return OverflowResult::AlwaysOverflows; 3599 } 3600 3601 if (LHSKnownNonNegative && RHSKnownNonNegative) { 3602 // The sign bit is clear in both cases: this CANNOT overflow. 3603 // Create a simple add instruction, and insert it into the struct. 3604 return OverflowResult::NeverOverflows; 3605 } 3606 } 3607 3608 return OverflowResult::MayOverflow; 3609 } 3610 3611 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 3612 const Value *RHS, 3613 const AddOperator *Add, 3614 const DataLayout &DL, 3615 AssumptionCache *AC, 3616 const Instruction *CxtI, 3617 const DominatorTree *DT) { 3618 if (Add && Add->hasNoSignedWrap()) { 3619 return OverflowResult::NeverOverflows; 3620 } 3621 3622 bool LHSKnownNonNegative, LHSKnownNegative; 3623 bool RHSKnownNonNegative, RHSKnownNegative; 3624 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0, 3625 AC, CxtI, DT); 3626 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0, 3627 AC, CxtI, DT); 3628 3629 if ((LHSKnownNonNegative && RHSKnownNegative) || 3630 (LHSKnownNegative && RHSKnownNonNegative)) { 3631 // The sign bits are opposite: this CANNOT overflow. 3632 return OverflowResult::NeverOverflows; 3633 } 3634 3635 // The remaining code needs Add to be available. Early returns if not so. 3636 if (!Add) 3637 return OverflowResult::MayOverflow; 3638 3639 // If the sign of Add is the same as at least one of the operands, this add 3640 // CANNOT overflow. This is particularly useful when the sum is 3641 // @llvm.assume'ed non-negative rather than proved so from analyzing its 3642 // operands. 3643 bool LHSOrRHSKnownNonNegative = 3644 (LHSKnownNonNegative || RHSKnownNonNegative); 3645 bool LHSOrRHSKnownNegative = (LHSKnownNegative || RHSKnownNegative); 3646 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 3647 bool AddKnownNonNegative, AddKnownNegative; 3648 ComputeSignBit(Add, AddKnownNonNegative, AddKnownNegative, DL, 3649 /*Depth=*/0, AC, CxtI, DT); 3650 if ((AddKnownNonNegative && LHSOrRHSKnownNonNegative) || 3651 (AddKnownNegative && LHSOrRHSKnownNegative)) { 3652 return OverflowResult::NeverOverflows; 3653 } 3654 } 3655 3656 return OverflowResult::MayOverflow; 3657 } 3658 3659 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, 3660 const DominatorTree &DT) { 3661 #ifndef NDEBUG 3662 auto IID = II->getIntrinsicID(); 3663 assert((IID == Intrinsic::sadd_with_overflow || 3664 IID == Intrinsic::uadd_with_overflow || 3665 IID == Intrinsic::ssub_with_overflow || 3666 IID == Intrinsic::usub_with_overflow || 3667 IID == Intrinsic::smul_with_overflow || 3668 IID == Intrinsic::umul_with_overflow) && 3669 "Not an overflow intrinsic!"); 3670 #endif 3671 3672 SmallVector<const BranchInst *, 2> GuardingBranches; 3673 SmallVector<const ExtractValueInst *, 2> Results; 3674 3675 for (const User *U : II->users()) { 3676 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 3677 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 3678 3679 if (EVI->getIndices()[0] == 0) 3680 Results.push_back(EVI); 3681 else { 3682 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 3683 3684 for (const auto *U : EVI->users()) 3685 if (const auto *B = dyn_cast<BranchInst>(U)) { 3686 assert(B->isConditional() && "How else is it using an i1?"); 3687 GuardingBranches.push_back(B); 3688 } 3689 } 3690 } else { 3691 // We are using the aggregate directly in a way we don't want to analyze 3692 // here (storing it to a global, say). 3693 return false; 3694 } 3695 } 3696 3697 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 3698 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 3699 if (!NoWrapEdge.isSingleEdge()) 3700 return false; 3701 3702 // Check if all users of the add are provably no-wrap. 3703 for (const auto *Result : Results) { 3704 // If the extractvalue itself is not executed on overflow, the we don't 3705 // need to check each use separately, since domination is transitive. 3706 if (DT.dominates(NoWrapEdge, Result->getParent())) 3707 continue; 3708 3709 for (auto &RU : Result->uses()) 3710 if (!DT.dominates(NoWrapEdge, RU)) 3711 return false; 3712 } 3713 3714 return true; 3715 }; 3716 3717 return any_of(GuardingBranches, AllUsesGuardedByBranch); 3718 } 3719 3720 3721 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 3722 const DataLayout &DL, 3723 AssumptionCache *AC, 3724 const Instruction *CxtI, 3725 const DominatorTree *DT) { 3726 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 3727 Add, DL, AC, CxtI, DT); 3728 } 3729 3730 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 3731 const Value *RHS, 3732 const DataLayout &DL, 3733 AssumptionCache *AC, 3734 const Instruction *CxtI, 3735 const DominatorTree *DT) { 3736 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 3737 } 3738 3739 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 3740 // A memory operation returns normally if it isn't volatile. A volatile 3741 // operation is allowed to trap. 3742 // 3743 // An atomic operation isn't guaranteed to return in a reasonable amount of 3744 // time because it's possible for another thread to interfere with it for an 3745 // arbitrary length of time, but programs aren't allowed to rely on that. 3746 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 3747 return !LI->isVolatile(); 3748 if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 3749 return !SI->isVolatile(); 3750 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 3751 return !CXI->isVolatile(); 3752 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 3753 return !RMWI->isVolatile(); 3754 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I)) 3755 return !MII->isVolatile(); 3756 3757 // If there is no successor, then execution can't transfer to it. 3758 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) 3759 return !CRI->unwindsToCaller(); 3760 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) 3761 return !CatchSwitch->unwindsToCaller(); 3762 if (isa<ResumeInst>(I)) 3763 return false; 3764 if (isa<ReturnInst>(I)) 3765 return false; 3766 if (isa<UnreachableInst>(I)) 3767 return false; 3768 3769 // Calls can throw, or contain an infinite loop, or kill the process. 3770 if (auto CS = ImmutableCallSite(I)) { 3771 // Call sites that throw have implicit non-local control flow. 3772 if (!CS.doesNotThrow()) 3773 return false; 3774 3775 // Non-throwing call sites can loop infinitely, call exit/pthread_exit 3776 // etc. and thus not return. However, LLVM already assumes that 3777 // 3778 // - Thread exiting actions are modeled as writes to memory invisible to 3779 // the program. 3780 // 3781 // - Loops that don't have side effects (side effects are volatile/atomic 3782 // stores and IO) always terminate (see http://llvm.org/PR965). 3783 // Furthermore IO itself is also modeled as writes to memory invisible to 3784 // the program. 3785 // 3786 // We rely on those assumptions here, and use the memory effects of the call 3787 // target as a proxy for checking that it always returns. 3788 3789 // FIXME: This isn't aggressive enough; a call which only writes to a global 3790 // is guaranteed to return. 3791 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() || 3792 match(I, m_Intrinsic<Intrinsic::assume>()); 3793 } 3794 3795 // Other instructions return normally. 3796 return true; 3797 } 3798 3799 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 3800 const Loop *L) { 3801 // The loop header is guaranteed to be executed for every iteration. 3802 // 3803 // FIXME: Relax this constraint to cover all basic blocks that are 3804 // guaranteed to be executed at every iteration. 3805 if (I->getParent() != L->getHeader()) return false; 3806 3807 for (const Instruction &LI : *L->getHeader()) { 3808 if (&LI == I) return true; 3809 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 3810 } 3811 llvm_unreachable("Instruction not contained in its own parent basic block."); 3812 } 3813 3814 bool llvm::propagatesFullPoison(const Instruction *I) { 3815 switch (I->getOpcode()) { 3816 case Instruction::Add: 3817 case Instruction::Sub: 3818 case Instruction::Xor: 3819 case Instruction::Trunc: 3820 case Instruction::BitCast: 3821 case Instruction::AddrSpaceCast: 3822 case Instruction::Mul: 3823 case Instruction::Shl: 3824 case Instruction::GetElementPtr: 3825 // These operations all propagate poison unconditionally. Note that poison 3826 // is not any particular value, so xor or subtraction of poison with 3827 // itself still yields poison, not zero. 3828 return true; 3829 3830 case Instruction::AShr: 3831 case Instruction::SExt: 3832 // For these operations, one bit of the input is replicated across 3833 // multiple output bits. A replicated poison bit is still poison. 3834 return true; 3835 3836 case Instruction::ICmp: 3837 // Comparing poison with any value yields poison. This is why, for 3838 // instance, x s< (x +nsw 1) can be folded to true. 3839 return true; 3840 3841 default: 3842 return false; 3843 } 3844 } 3845 3846 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) { 3847 switch (I->getOpcode()) { 3848 case Instruction::Store: 3849 return cast<StoreInst>(I)->getPointerOperand(); 3850 3851 case Instruction::Load: 3852 return cast<LoadInst>(I)->getPointerOperand(); 3853 3854 case Instruction::AtomicCmpXchg: 3855 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 3856 3857 case Instruction::AtomicRMW: 3858 return cast<AtomicRMWInst>(I)->getPointerOperand(); 3859 3860 case Instruction::UDiv: 3861 case Instruction::SDiv: 3862 case Instruction::URem: 3863 case Instruction::SRem: 3864 return I->getOperand(1); 3865 3866 default: 3867 return nullptr; 3868 } 3869 } 3870 3871 bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) { 3872 // We currently only look for uses of poison values within the same basic 3873 // block, as that makes it easier to guarantee that the uses will be 3874 // executed given that PoisonI is executed. 3875 // 3876 // FIXME: Expand this to consider uses beyond the same basic block. To do 3877 // this, look out for the distinction between post-dominance and strong 3878 // post-dominance. 3879 const BasicBlock *BB = PoisonI->getParent(); 3880 3881 // Set of instructions that we have proved will yield poison if PoisonI 3882 // does. 3883 SmallSet<const Value *, 16> YieldsPoison; 3884 SmallSet<const BasicBlock *, 4> Visited; 3885 YieldsPoison.insert(PoisonI); 3886 Visited.insert(PoisonI->getParent()); 3887 3888 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end(); 3889 3890 unsigned Iter = 0; 3891 while (Iter++ < MaxDepth) { 3892 for (auto &I : make_range(Begin, End)) { 3893 if (&I != PoisonI) { 3894 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I); 3895 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) 3896 return true; 3897 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 3898 return false; 3899 } 3900 3901 // Mark poison that propagates from I through uses of I. 3902 if (YieldsPoison.count(&I)) { 3903 for (const User *User : I.users()) { 3904 const Instruction *UserI = cast<Instruction>(User); 3905 if (propagatesFullPoison(UserI)) 3906 YieldsPoison.insert(User); 3907 } 3908 } 3909 } 3910 3911 if (auto *NextBB = BB->getSingleSuccessor()) { 3912 if (Visited.insert(NextBB).second) { 3913 BB = NextBB; 3914 Begin = BB->getFirstNonPHI()->getIterator(); 3915 End = BB->end(); 3916 continue; 3917 } 3918 } 3919 3920 break; 3921 }; 3922 return false; 3923 } 3924 3925 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 3926 if (FMF.noNaNs()) 3927 return true; 3928 3929 if (auto *C = dyn_cast<ConstantFP>(V)) 3930 return !C->isNaN(); 3931 return false; 3932 } 3933 3934 static bool isKnownNonZero(const Value *V) { 3935 if (auto *C = dyn_cast<ConstantFP>(V)) 3936 return !C->isZero(); 3937 return false; 3938 } 3939 3940 /// Match non-obvious integer minimum and maximum sequences. 3941 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 3942 Value *CmpLHS, Value *CmpRHS, 3943 Value *TrueVal, Value *FalseVal, 3944 Value *&LHS, Value *&RHS) { 3945 // Assume success. If there's no match, callers should not use these anyway. 3946 LHS = TrueVal; 3947 RHS = FalseVal; 3948 3949 // Recognize variations of: 3950 // CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 3951 const APInt *C1; 3952 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 3953 const APInt *C2; 3954 3955 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 3956 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 3957 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 3958 return {SPF_SMAX, SPNB_NA, false}; 3959 3960 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 3961 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 3962 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 3963 return {SPF_SMIN, SPNB_NA, false}; 3964 3965 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 3966 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 3967 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 3968 return {SPF_UMAX, SPNB_NA, false}; 3969 3970 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 3971 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 3972 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 3973 return {SPF_UMIN, SPNB_NA, false}; 3974 } 3975 3976 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 3977 return {SPF_UNKNOWN, SPNB_NA, false}; 3978 3979 // Z = X -nsw Y 3980 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) 3981 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) 3982 if (match(TrueVal, m_Zero()) && 3983 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 3984 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 3985 3986 // Z = X -nsw Y 3987 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) 3988 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) 3989 if (match(FalseVal, m_Zero()) && 3990 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 3991 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 3992 3993 if (!match(CmpRHS, m_APInt(C1))) 3994 return {SPF_UNKNOWN, SPNB_NA, false}; 3995 3996 // An unsigned min/max can be written with a signed compare. 3997 const APInt *C2; 3998 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 3999 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 4000 // Is the sign bit set? 4001 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 4002 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 4003 if (Pred == CmpInst::ICMP_SLT && *C1 == 0 && C2->isMaxSignedValue()) 4004 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4005 4006 // Is the sign bit clear? 4007 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 4008 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 4009 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && 4010 C2->isMinSignedValue()) 4011 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4012 } 4013 4014 // Look through 'not' ops to find disguised signed min/max. 4015 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C) 4016 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C) 4017 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) && 4018 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2) 4019 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4020 4021 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X) 4022 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X) 4023 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) && 4024 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2) 4025 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4026 4027 return {SPF_UNKNOWN, SPNB_NA, false}; 4028 } 4029 4030 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 4031 FastMathFlags FMF, 4032 Value *CmpLHS, Value *CmpRHS, 4033 Value *TrueVal, Value *FalseVal, 4034 Value *&LHS, Value *&RHS) { 4035 LHS = CmpLHS; 4036 RHS = CmpRHS; 4037 4038 // If the predicate is an "or-equal" (FP) predicate, then signed zeroes may 4039 // return inconsistent results between implementations. 4040 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 4041 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 4042 // Therefore we behave conservatively and only proceed if at least one of the 4043 // operands is known to not be zero, or if we don't care about signed zeroes. 4044 switch (Pred) { 4045 default: break; 4046 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 4047 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 4048 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4049 !isKnownNonZero(CmpRHS)) 4050 return {SPF_UNKNOWN, SPNB_NA, false}; 4051 } 4052 4053 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 4054 bool Ordered = false; 4055 4056 // When given one NaN and one non-NaN input: 4057 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 4058 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 4059 // ordered comparison fails), which could be NaN or non-NaN. 4060 // so here we discover exactly what NaN behavior is required/accepted. 4061 if (CmpInst::isFPPredicate(Pred)) { 4062 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 4063 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 4064 4065 if (LHSSafe && RHSSafe) { 4066 // Both operands are known non-NaN. 4067 NaNBehavior = SPNB_RETURNS_ANY; 4068 } else if (CmpInst::isOrdered(Pred)) { 4069 // An ordered comparison will return false when given a NaN, so it 4070 // returns the RHS. 4071 Ordered = true; 4072 if (LHSSafe) 4073 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 4074 NaNBehavior = SPNB_RETURNS_NAN; 4075 else if (RHSSafe) 4076 NaNBehavior = SPNB_RETURNS_OTHER; 4077 else 4078 // Completely unsafe. 4079 return {SPF_UNKNOWN, SPNB_NA, false}; 4080 } else { 4081 Ordered = false; 4082 // An unordered comparison will return true when given a NaN, so it 4083 // returns the LHS. 4084 if (LHSSafe) 4085 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 4086 NaNBehavior = SPNB_RETURNS_OTHER; 4087 else if (RHSSafe) 4088 NaNBehavior = SPNB_RETURNS_NAN; 4089 else 4090 // Completely unsafe. 4091 return {SPF_UNKNOWN, SPNB_NA, false}; 4092 } 4093 } 4094 4095 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 4096 std::swap(CmpLHS, CmpRHS); 4097 Pred = CmpInst::getSwappedPredicate(Pred); 4098 if (NaNBehavior == SPNB_RETURNS_NAN) 4099 NaNBehavior = SPNB_RETURNS_OTHER; 4100 else if (NaNBehavior == SPNB_RETURNS_OTHER) 4101 NaNBehavior = SPNB_RETURNS_NAN; 4102 Ordered = !Ordered; 4103 } 4104 4105 // ([if]cmp X, Y) ? X : Y 4106 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 4107 switch (Pred) { 4108 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 4109 case ICmpInst::ICMP_UGT: 4110 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 4111 case ICmpInst::ICMP_SGT: 4112 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 4113 case ICmpInst::ICMP_ULT: 4114 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 4115 case ICmpInst::ICMP_SLT: 4116 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 4117 case FCmpInst::FCMP_UGT: 4118 case FCmpInst::FCMP_UGE: 4119 case FCmpInst::FCMP_OGT: 4120 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 4121 case FCmpInst::FCMP_ULT: 4122 case FCmpInst::FCMP_ULE: 4123 case FCmpInst::FCMP_OLT: 4124 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 4125 } 4126 } 4127 4128 const APInt *C1; 4129 if (match(CmpRHS, m_APInt(C1))) { 4130 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) || 4131 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) { 4132 4133 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X 4134 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X 4135 if (Pred == ICmpInst::ICMP_SGT && (*C1 == 0 || C1->isAllOnesValue())) { 4136 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 4137 } 4138 4139 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X 4140 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X 4141 if (Pred == ICmpInst::ICMP_SLT && (*C1 == 0 || *C1 == 1)) { 4142 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 4143 } 4144 } 4145 } 4146 4147 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 4148 } 4149 4150 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 4151 Instruction::CastOps *CastOp) { 4152 auto *Cast1 = dyn_cast<CastInst>(V1); 4153 if (!Cast1) 4154 return nullptr; 4155 4156 *CastOp = Cast1->getOpcode(); 4157 Type *SrcTy = Cast1->getSrcTy(); 4158 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 4159 // If V1 and V2 are both the same cast from the same type, look through V1. 4160 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 4161 return Cast2->getOperand(0); 4162 return nullptr; 4163 } 4164 4165 auto *C = dyn_cast<Constant>(V2); 4166 if (!C) 4167 return nullptr; 4168 4169 Constant *CastedTo = nullptr; 4170 switch (*CastOp) { 4171 case Instruction::ZExt: 4172 if (CmpI->isUnsigned()) 4173 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 4174 break; 4175 case Instruction::SExt: 4176 if (CmpI->isSigned()) 4177 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 4178 break; 4179 case Instruction::Trunc: 4180 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 4181 break; 4182 case Instruction::FPTrunc: 4183 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 4184 break; 4185 case Instruction::FPExt: 4186 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 4187 break; 4188 case Instruction::FPToUI: 4189 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 4190 break; 4191 case Instruction::FPToSI: 4192 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 4193 break; 4194 case Instruction::UIToFP: 4195 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 4196 break; 4197 case Instruction::SIToFP: 4198 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 4199 break; 4200 default: 4201 break; 4202 } 4203 4204 if (!CastedTo) 4205 return nullptr; 4206 4207 // Make sure the cast doesn't lose any information. 4208 Constant *CastedBack = 4209 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 4210 if (CastedBack != C) 4211 return nullptr; 4212 4213 return CastedTo; 4214 } 4215 4216 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 4217 Instruction::CastOps *CastOp) { 4218 SelectInst *SI = dyn_cast<SelectInst>(V); 4219 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 4220 4221 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 4222 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 4223 4224 CmpInst::Predicate Pred = CmpI->getPredicate(); 4225 Value *CmpLHS = CmpI->getOperand(0); 4226 Value *CmpRHS = CmpI->getOperand(1); 4227 Value *TrueVal = SI->getTrueValue(); 4228 Value *FalseVal = SI->getFalseValue(); 4229 FastMathFlags FMF; 4230 if (isa<FPMathOperator>(CmpI)) 4231 FMF = CmpI->getFastMathFlags(); 4232 4233 // Bail out early. 4234 if (CmpI->isEquality()) 4235 return {SPF_UNKNOWN, SPNB_NA, false}; 4236 4237 // Deal with type mismatches. 4238 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 4239 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) 4240 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4241 cast<CastInst>(TrueVal)->getOperand(0), C, 4242 LHS, RHS); 4243 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) 4244 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4245 C, cast<CastInst>(FalseVal)->getOperand(0), 4246 LHS, RHS); 4247 } 4248 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 4249 LHS, RHS); 4250 } 4251 4252 /// Return true if "icmp Pred LHS RHS" is always true. 4253 static bool isTruePredicate(CmpInst::Predicate Pred, 4254 const Value *LHS, const Value *RHS, 4255 const DataLayout &DL, unsigned Depth, 4256 AssumptionCache *AC, const Instruction *CxtI, 4257 const DominatorTree *DT) { 4258 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 4259 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 4260 return true; 4261 4262 switch (Pred) { 4263 default: 4264 return false; 4265 4266 case CmpInst::ICMP_SLE: { 4267 const APInt *C; 4268 4269 // LHS s<= LHS +_{nsw} C if C >= 0 4270 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 4271 return !C->isNegative(); 4272 return false; 4273 } 4274 4275 case CmpInst::ICMP_ULE: { 4276 const APInt *C; 4277 4278 // LHS u<= LHS +_{nuw} C for any C 4279 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 4280 return true; 4281 4282 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 4283 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 4284 const Value *&X, 4285 const APInt *&CA, const APInt *&CB) { 4286 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 4287 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 4288 return true; 4289 4290 // If X & C == 0 then (X | C) == X +_{nuw} C 4291 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 4292 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 4293 unsigned BitWidth = CA->getBitWidth(); 4294 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 4295 computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT); 4296 4297 if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB) 4298 return true; 4299 } 4300 4301 return false; 4302 }; 4303 4304 const Value *X; 4305 const APInt *CLHS, *CRHS; 4306 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 4307 return CLHS->ule(*CRHS); 4308 4309 return false; 4310 } 4311 } 4312 } 4313 4314 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 4315 /// ALHS ARHS" is true. Otherwise, return None. 4316 static Optional<bool> 4317 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 4318 const Value *ARHS, const Value *BLHS, 4319 const Value *BRHS, const DataLayout &DL, 4320 unsigned Depth, AssumptionCache *AC, 4321 const Instruction *CxtI, const DominatorTree *DT) { 4322 switch (Pred) { 4323 default: 4324 return None; 4325 4326 case CmpInst::ICMP_SLT: 4327 case CmpInst::ICMP_SLE: 4328 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI, 4329 DT) && 4330 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI, DT)) 4331 return true; 4332 return None; 4333 4334 case CmpInst::ICMP_ULT: 4335 case CmpInst::ICMP_ULE: 4336 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI, 4337 DT) && 4338 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI, DT)) 4339 return true; 4340 return None; 4341 } 4342 } 4343 4344 /// Return true if the operands of the two compares match. IsSwappedOps is true 4345 /// when the operands match, but are swapped. 4346 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 4347 const Value *BLHS, const Value *BRHS, 4348 bool &IsSwappedOps) { 4349 4350 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 4351 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 4352 return IsMatchingOps || IsSwappedOps; 4353 } 4354 4355 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is 4356 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS 4357 /// BRHS" is false. Otherwise, return None if we can't infer anything. 4358 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 4359 const Value *ALHS, 4360 const Value *ARHS, 4361 CmpInst::Predicate BPred, 4362 const Value *BLHS, 4363 const Value *BRHS, 4364 bool IsSwappedOps) { 4365 // Canonicalize the operands so they're matching. 4366 if (IsSwappedOps) { 4367 std::swap(BLHS, BRHS); 4368 BPred = ICmpInst::getSwappedPredicate(BPred); 4369 } 4370 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 4371 return true; 4372 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 4373 return false; 4374 4375 return None; 4376 } 4377 4378 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is 4379 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS 4380 /// C2" is false. Otherwise, return None if we can't infer anything. 4381 static Optional<bool> 4382 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS, 4383 const ConstantInt *C1, 4384 CmpInst::Predicate BPred, 4385 const Value *BLHS, const ConstantInt *C2) { 4386 assert(ALHS == BLHS && "LHS operands must match."); 4387 ConstantRange DomCR = 4388 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 4389 ConstantRange CR = 4390 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); 4391 ConstantRange Intersection = DomCR.intersectWith(CR); 4392 ConstantRange Difference = DomCR.difference(CR); 4393 if (Intersection.isEmptySet()) 4394 return false; 4395 if (Difference.isEmptySet()) 4396 return true; 4397 return None; 4398 } 4399 4400 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 4401 const DataLayout &DL, bool InvertAPred, 4402 unsigned Depth, AssumptionCache *AC, 4403 const Instruction *CxtI, 4404 const DominatorTree *DT) { 4405 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for example. 4406 if (LHS->getType() != RHS->getType()) 4407 return None; 4408 4409 Type *OpTy = LHS->getType(); 4410 assert(OpTy->getScalarType()->isIntegerTy(1)); 4411 4412 // LHS ==> RHS by definition 4413 if (!InvertAPred && LHS == RHS) 4414 return true; 4415 4416 if (OpTy->isVectorTy()) 4417 // TODO: extending the code below to handle vectors 4418 return None; 4419 assert(OpTy->isIntegerTy(1) && "implied by above"); 4420 4421 ICmpInst::Predicate APred, BPred; 4422 Value *ALHS, *ARHS; 4423 Value *BLHS, *BRHS; 4424 4425 if (!match(LHS, m_ICmp(APred, m_Value(ALHS), m_Value(ARHS))) || 4426 !match(RHS, m_ICmp(BPred, m_Value(BLHS), m_Value(BRHS)))) 4427 return None; 4428 4429 if (InvertAPred) 4430 APred = CmpInst::getInversePredicate(APred); 4431 4432 // Can we infer anything when the two compares have matching operands? 4433 bool IsSwappedOps; 4434 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) { 4435 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 4436 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps)) 4437 return Implication; 4438 // No amount of additional analysis will infer the second condition, so 4439 // early exit. 4440 return None; 4441 } 4442 4443 // Can we infer anything when the LHS operands match and the RHS operands are 4444 // constants (not necessarily matching)? 4445 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 4446 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 4447 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS, 4448 cast<ConstantInt>(BRHS))) 4449 return Implication; 4450 // No amount of additional analysis will infer the second condition, so 4451 // early exit. 4452 return None; 4453 } 4454 4455 if (APred == BPred) 4456 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC, 4457 CxtI, DT); 4458 4459 return None; 4460 } 4461