1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains routines that help analyze properties that chains of 11 // computations have. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/InstructionSimplify.h" 30 #include "llvm/Analysis/Loads.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 33 #include "llvm/Analysis/TargetLibraryInfo.h" 34 #include "llvm/IR/Argument.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/CallSite.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/ConstantRange.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/DiagnosticInfo.h" 44 #include "llvm/IR/Dominators.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/GetElementPtrTypeIterator.h" 47 #include "llvm/IR/GlobalAlias.h" 48 #include "llvm/IR/GlobalValue.h" 49 #include "llvm/IR/GlobalVariable.h" 50 #include "llvm/IR/InstrTypes.h" 51 #include "llvm/IR/Instruction.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/IntrinsicInst.h" 54 #include "llvm/IR/Intrinsics.h" 55 #include "llvm/IR/LLVMContext.h" 56 #include "llvm/IR/Metadata.h" 57 #include "llvm/IR/Module.h" 58 #include "llvm/IR/Operator.h" 59 #include "llvm/IR/PatternMatch.h" 60 #include "llvm/IR/Type.h" 61 #include "llvm/IR/User.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/Support/Casting.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/Compiler.h" 66 #include "llvm/Support/ErrorHandling.h" 67 #include "llvm/Support/KnownBits.h" 68 #include "llvm/Support/MathExtras.h" 69 #include <algorithm> 70 #include <array> 71 #include <cassert> 72 #include <cstdint> 73 #include <iterator> 74 #include <utility> 75 76 using namespace llvm; 77 using namespace llvm::PatternMatch; 78 79 const unsigned MaxDepth = 6; 80 81 // Controls the number of uses of the value searched for possible 82 // dominating comparisons. 83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 84 cl::Hidden, cl::init(20)); 85 86 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 87 /// returns the element type's bitwidth. 88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 89 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 90 return BitWidth; 91 92 return DL.getPointerTypeSizeInBits(Ty); 93 } 94 95 namespace { 96 97 // Simplifying using an assume can only be done in a particular control-flow 98 // context (the context instruction provides that context). If an assume and 99 // the context instruction are not in the same block then the DT helps in 100 // figuring out if we can use it. 101 struct Query { 102 const DataLayout &DL; 103 AssumptionCache *AC; 104 const Instruction *CxtI; 105 const DominatorTree *DT; 106 107 // Unlike the other analyses, this may be a nullptr because not all clients 108 // provide it currently. 109 OptimizationRemarkEmitter *ORE; 110 111 /// Set of assumptions that should be excluded from further queries. 112 /// This is because of the potential for mutual recursion to cause 113 /// computeKnownBits to repeatedly visit the same assume intrinsic. The 114 /// classic case of this is assume(x = y), which will attempt to determine 115 /// bits in x from bits in y, which will attempt to determine bits in y from 116 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 117 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo 118 /// (all of which can call computeKnownBits), and so on. 119 std::array<const Value *, MaxDepth> Excluded; 120 121 unsigned NumExcluded = 0; 122 123 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 124 const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr) 125 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE) {} 126 127 Query(const Query &Q, const Value *NewExcl) 128 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), 129 NumExcluded(Q.NumExcluded) { 130 Excluded = Q.Excluded; 131 Excluded[NumExcluded++] = NewExcl; 132 assert(NumExcluded <= Excluded.size()); 133 } 134 135 bool isExcluded(const Value *Value) const { 136 if (NumExcluded == 0) 137 return false; 138 auto End = Excluded.begin() + NumExcluded; 139 return std::find(Excluded.begin(), End, Value) != End; 140 } 141 }; 142 143 } // end anonymous namespace 144 145 // Given the provided Value and, potentially, a context instruction, return 146 // the preferred context instruction (if any). 147 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 148 // If we've been provided with a context instruction, then use that (provided 149 // it has been inserted). 150 if (CxtI && CxtI->getParent()) 151 return CxtI; 152 153 // If the value is really an already-inserted instruction, then use that. 154 CxtI = dyn_cast<Instruction>(V); 155 if (CxtI && CxtI->getParent()) 156 return CxtI; 157 158 return nullptr; 159 } 160 161 static void computeKnownBits(const Value *V, KnownBits &Known, 162 unsigned Depth, const Query &Q); 163 164 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 165 const DataLayout &DL, unsigned Depth, 166 AssumptionCache *AC, const Instruction *CxtI, 167 const DominatorTree *DT, 168 OptimizationRemarkEmitter *ORE) { 169 ::computeKnownBits(V, Known, Depth, 170 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE)); 171 } 172 173 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 174 const Query &Q); 175 176 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 177 unsigned Depth, AssumptionCache *AC, 178 const Instruction *CxtI, 179 const DominatorTree *DT, 180 OptimizationRemarkEmitter *ORE) { 181 return ::computeKnownBits(V, Depth, 182 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE)); 183 } 184 185 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 186 const DataLayout &DL, 187 AssumptionCache *AC, const Instruction *CxtI, 188 const DominatorTree *DT) { 189 assert(LHS->getType() == RHS->getType() && 190 "LHS and RHS should have the same type"); 191 assert(LHS->getType()->isIntOrIntVectorTy() && 192 "LHS and RHS should be integers"); 193 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 194 KnownBits LHSKnown(IT->getBitWidth()); 195 KnownBits RHSKnown(IT->getBitWidth()); 196 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT); 197 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT); 198 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue(); 199 } 200 201 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) { 202 for (const User *U : CxtI->users()) { 203 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) 204 if (IC->isEquality()) 205 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1))) 206 if (C->isNullValue()) 207 continue; 208 return false; 209 } 210 return true; 211 } 212 213 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 214 const Query &Q); 215 216 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 217 bool OrZero, 218 unsigned Depth, AssumptionCache *AC, 219 const Instruction *CxtI, 220 const DominatorTree *DT) { 221 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, 222 Query(DL, AC, safeCxtI(V, CxtI), DT)); 223 } 224 225 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 226 227 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 228 AssumptionCache *AC, const Instruction *CxtI, 229 const DominatorTree *DT) { 230 return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 231 } 232 233 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 234 unsigned Depth, 235 AssumptionCache *AC, const Instruction *CxtI, 236 const DominatorTree *DT) { 237 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT); 238 return Known.isNonNegative(); 239 } 240 241 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 242 AssumptionCache *AC, const Instruction *CxtI, 243 const DominatorTree *DT) { 244 if (auto *CI = dyn_cast<ConstantInt>(V)) 245 return CI->getValue().isStrictlyPositive(); 246 247 // TODO: We'd doing two recursive queries here. We should factor this such 248 // that only a single query is needed. 249 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) && 250 isKnownNonZero(V, DL, Depth, AC, CxtI, DT); 251 } 252 253 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 254 AssumptionCache *AC, const Instruction *CxtI, 255 const DominatorTree *DT) { 256 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT); 257 return Known.isNegative(); 258 } 259 260 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); 261 262 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 263 const DataLayout &DL, 264 AssumptionCache *AC, const Instruction *CxtI, 265 const DominatorTree *DT) { 266 return ::isKnownNonEqual(V1, V2, Query(DL, AC, 267 safeCxtI(V1, safeCxtI(V2, CxtI)), 268 DT)); 269 } 270 271 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 272 const Query &Q); 273 274 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 275 const DataLayout &DL, 276 unsigned Depth, AssumptionCache *AC, 277 const Instruction *CxtI, const DominatorTree *DT) { 278 return ::MaskedValueIsZero(V, Mask, Depth, 279 Query(DL, AC, safeCxtI(V, CxtI), DT)); 280 } 281 282 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 283 const Query &Q); 284 285 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 286 unsigned Depth, AssumptionCache *AC, 287 const Instruction *CxtI, 288 const DominatorTree *DT) { 289 return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 290 } 291 292 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 293 bool NSW, 294 KnownBits &KnownOut, KnownBits &Known2, 295 unsigned Depth, const Query &Q) { 296 unsigned BitWidth = KnownOut.getBitWidth(); 297 298 // If an initial sequence of bits in the result is not needed, the 299 // corresponding bits in the operands are not needed. 300 KnownBits LHSKnown(BitWidth); 301 computeKnownBits(Op0, LHSKnown, Depth + 1, Q); 302 computeKnownBits(Op1, Known2, Depth + 1, Q); 303 304 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2); 305 } 306 307 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 308 KnownBits &Known, KnownBits &Known2, 309 unsigned Depth, const Query &Q) { 310 unsigned BitWidth = Known.getBitWidth(); 311 computeKnownBits(Op1, Known, Depth + 1, Q); 312 computeKnownBits(Op0, Known2, Depth + 1, Q); 313 314 bool isKnownNegative = false; 315 bool isKnownNonNegative = false; 316 // If the multiplication is known not to overflow, compute the sign bit. 317 if (NSW) { 318 if (Op0 == Op1) { 319 // The product of a number with itself is non-negative. 320 isKnownNonNegative = true; 321 } else { 322 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 323 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 324 bool isKnownNegativeOp1 = Known.isNegative(); 325 bool isKnownNegativeOp0 = Known2.isNegative(); 326 // The product of two numbers with the same sign is non-negative. 327 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 328 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 329 // The product of a negative number and a non-negative number is either 330 // negative or zero. 331 if (!isKnownNonNegative) 332 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 333 isKnownNonZero(Op0, Depth, Q)) || 334 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 335 isKnownNonZero(Op1, Depth, Q)); 336 } 337 } 338 339 assert(!Known.hasConflict() && !Known2.hasConflict()); 340 // Compute a conservative estimate for high known-0 bits. 341 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 342 Known2.countMinLeadingZeros(), 343 BitWidth) - BitWidth; 344 LeadZ = std::min(LeadZ, BitWidth); 345 346 // The result of the bottom bits of an integer multiply can be 347 // inferred by looking at the bottom bits of both operands and 348 // multiplying them together. 349 // We can infer at least the minimum number of known trailing bits 350 // of both operands. Depending on number of trailing zeros, we can 351 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming 352 // a and b are divisible by m and n respectively. 353 // We then calculate how many of those bits are inferrable and set 354 // the output. For example, the i8 mul: 355 // a = XXXX1100 (12) 356 // b = XXXX1110 (14) 357 // We know the bottom 3 bits are zero since the first can be divided by 358 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4). 359 // Applying the multiplication to the trimmed arguments gets: 360 // XX11 (3) 361 // X111 (7) 362 // ------- 363 // XX11 364 // XX11 365 // XX11 366 // XX11 367 // ------- 368 // XXXXX01 369 // Which allows us to infer the 2 LSBs. Since we're multiplying the result 370 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits. 371 // The proof for this can be described as: 372 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) && 373 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) + 374 // umin(countTrailingZeros(C2), C6) + 375 // umin(C5 - umin(countTrailingZeros(C1), C5), 376 // C6 - umin(countTrailingZeros(C2), C6)))) - 1) 377 // %aa = shl i8 %a, C5 378 // %bb = shl i8 %b, C6 379 // %aaa = or i8 %aa, C1 380 // %bbb = or i8 %bb, C2 381 // %mul = mul i8 %aaa, %bbb 382 // %mask = and i8 %mul, C7 383 // => 384 // %mask = i8 ((C1*C2)&C7) 385 // Where C5, C6 describe the known bits of %a, %b 386 // C1, C2 describe the known bottom bits of %a, %b. 387 // C7 describes the mask of the known bits of the result. 388 APInt Bottom0 = Known.One; 389 APInt Bottom1 = Known2.One; 390 391 // How many times we'd be able to divide each argument by 2 (shr by 1). 392 // This gives us the number of trailing zeros on the multiplication result. 393 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes(); 394 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes(); 395 unsigned TrailZero0 = Known.countMinTrailingZeros(); 396 unsigned TrailZero1 = Known2.countMinTrailingZeros(); 397 unsigned TrailZ = TrailZero0 + TrailZero1; 398 399 // Figure out the fewest known-bits operand. 400 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0, 401 TrailBitsKnown1 - TrailZero1); 402 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth); 403 404 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) * 405 Bottom1.getLoBits(TrailBitsKnown1); 406 407 Known.resetAll(); 408 Known.Zero.setHighBits(LeadZ); 409 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown); 410 Known.One |= BottomKnown.getLoBits(ResultBitsKnown); 411 412 // Only make use of no-wrap flags if we failed to compute the sign bit 413 // directly. This matters if the multiplication always overflows, in 414 // which case we prefer to follow the result of the direct computation, 415 // though as the program is invoking undefined behaviour we can choose 416 // whatever we like here. 417 if (isKnownNonNegative && !Known.isNegative()) 418 Known.makeNonNegative(); 419 else if (isKnownNegative && !Known.isNonNegative()) 420 Known.makeNegative(); 421 } 422 423 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 424 KnownBits &Known) { 425 unsigned BitWidth = Known.getBitWidth(); 426 unsigned NumRanges = Ranges.getNumOperands() / 2; 427 assert(NumRanges >= 1); 428 429 Known.Zero.setAllBits(); 430 Known.One.setAllBits(); 431 432 for (unsigned i = 0; i < NumRanges; ++i) { 433 ConstantInt *Lower = 434 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 435 ConstantInt *Upper = 436 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 437 ConstantRange Range(Lower->getValue(), Upper->getValue()); 438 439 // The first CommonPrefixBits of all values in Range are equal. 440 unsigned CommonPrefixBits = 441 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 442 443 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 444 Known.One &= Range.getUnsignedMax() & Mask; 445 Known.Zero &= ~Range.getUnsignedMax() & Mask; 446 } 447 } 448 449 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 450 SmallVector<const Value *, 16> WorkSet(1, I); 451 SmallPtrSet<const Value *, 32> Visited; 452 SmallPtrSet<const Value *, 16> EphValues; 453 454 // The instruction defining an assumption's condition itself is always 455 // considered ephemeral to that assumption (even if it has other 456 // non-ephemeral users). See r246696's test case for an example. 457 if (is_contained(I->operands(), E)) 458 return true; 459 460 while (!WorkSet.empty()) { 461 const Value *V = WorkSet.pop_back_val(); 462 if (!Visited.insert(V).second) 463 continue; 464 465 // If all uses of this value are ephemeral, then so is this value. 466 if (llvm::all_of(V->users(), [&](const User *U) { 467 return EphValues.count(U); 468 })) { 469 if (V == E) 470 return true; 471 472 if (V == I || isSafeToSpeculativelyExecute(V)) { 473 EphValues.insert(V); 474 if (const User *U = dyn_cast<User>(V)) 475 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 476 J != JE; ++J) 477 WorkSet.push_back(*J); 478 } 479 } 480 } 481 482 return false; 483 } 484 485 // Is this an intrinsic that cannot be speculated but also cannot trap? 486 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { 487 if (const CallInst *CI = dyn_cast<CallInst>(I)) 488 if (Function *F = CI->getCalledFunction()) 489 switch (F->getIntrinsicID()) { 490 default: break; 491 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 492 case Intrinsic::assume: 493 case Intrinsic::sideeffect: 494 case Intrinsic::dbg_declare: 495 case Intrinsic::dbg_value: 496 case Intrinsic::invariant_start: 497 case Intrinsic::invariant_end: 498 case Intrinsic::lifetime_start: 499 case Intrinsic::lifetime_end: 500 case Intrinsic::objectsize: 501 case Intrinsic::ptr_annotation: 502 case Intrinsic::var_annotation: 503 return true; 504 } 505 506 return false; 507 } 508 509 bool llvm::isValidAssumeForContext(const Instruction *Inv, 510 const Instruction *CxtI, 511 const DominatorTree *DT) { 512 // There are two restrictions on the use of an assume: 513 // 1. The assume must dominate the context (or the control flow must 514 // reach the assume whenever it reaches the context). 515 // 2. The context must not be in the assume's set of ephemeral values 516 // (otherwise we will use the assume to prove that the condition 517 // feeding the assume is trivially true, thus causing the removal of 518 // the assume). 519 520 if (DT) { 521 if (DT->dominates(Inv, CxtI)) 522 return true; 523 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 524 // We don't have a DT, but this trivially dominates. 525 return true; 526 } 527 528 // With or without a DT, the only remaining case we will check is if the 529 // instructions are in the same BB. Give up if that is not the case. 530 if (Inv->getParent() != CxtI->getParent()) 531 return false; 532 533 // If we have a dom tree, then we now know that the assume doens't dominate 534 // the other instruction. If we don't have a dom tree then we can check if 535 // the assume is first in the BB. 536 if (!DT) { 537 // Search forward from the assume until we reach the context (or the end 538 // of the block); the common case is that the assume will come first. 539 for (auto I = std::next(BasicBlock::const_iterator(Inv)), 540 IE = Inv->getParent()->end(); I != IE; ++I) 541 if (&*I == CxtI) 542 return true; 543 } 544 545 // The context comes first, but they're both in the same block. Make sure 546 // there is nothing in between that might interrupt the control flow. 547 for (BasicBlock::const_iterator I = 548 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv); 549 I != IE; ++I) 550 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 551 return false; 552 553 return !isEphemeralValueOf(Inv, CxtI); 554 } 555 556 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 557 unsigned Depth, const Query &Q) { 558 // Use of assumptions is context-sensitive. If we don't have a context, we 559 // cannot use them! 560 if (!Q.AC || !Q.CxtI) 561 return; 562 563 unsigned BitWidth = Known.getBitWidth(); 564 565 // Note that the patterns below need to be kept in sync with the code 566 // in AssumptionCache::updateAffectedValues. 567 568 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 569 if (!AssumeVH) 570 continue; 571 CallInst *I = cast<CallInst>(AssumeVH); 572 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 573 "Got assumption for the wrong function!"); 574 if (Q.isExcluded(I)) 575 continue; 576 577 // Warning: This loop can end up being somewhat performance sensetive. 578 // We're running this loop for once for each value queried resulting in a 579 // runtime of ~O(#assumes * #values). 580 581 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 582 "must be an assume intrinsic"); 583 584 Value *Arg = I->getArgOperand(0); 585 586 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 587 assert(BitWidth == 1 && "assume operand is not i1?"); 588 Known.setAllOnes(); 589 return; 590 } 591 if (match(Arg, m_Not(m_Specific(V))) && 592 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 593 assert(BitWidth == 1 && "assume operand is not i1?"); 594 Known.setAllZero(); 595 return; 596 } 597 598 // The remaining tests are all recursive, so bail out if we hit the limit. 599 if (Depth == MaxDepth) 600 continue; 601 602 Value *A, *B; 603 auto m_V = m_CombineOr(m_Specific(V), 604 m_CombineOr(m_PtrToInt(m_Specific(V)), 605 m_BitCast(m_Specific(V)))); 606 607 CmpInst::Predicate Pred; 608 uint64_t C; 609 // assume(v = a) 610 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 611 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 612 KnownBits RHSKnown(BitWidth); 613 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 614 Known.Zero |= RHSKnown.Zero; 615 Known.One |= RHSKnown.One; 616 // assume(v & b = a) 617 } else if (match(Arg, 618 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 619 Pred == ICmpInst::ICMP_EQ && 620 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 621 KnownBits RHSKnown(BitWidth); 622 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 623 KnownBits MaskKnown(BitWidth); 624 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 625 626 // For those bits in the mask that are known to be one, we can propagate 627 // known bits from the RHS to V. 628 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 629 Known.One |= RHSKnown.One & MaskKnown.One; 630 // assume(~(v & b) = a) 631 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 632 m_Value(A))) && 633 Pred == ICmpInst::ICMP_EQ && 634 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 635 KnownBits RHSKnown(BitWidth); 636 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 637 KnownBits MaskKnown(BitWidth); 638 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 639 640 // For those bits in the mask that are known to be one, we can propagate 641 // inverted known bits from the RHS to V. 642 Known.Zero |= RHSKnown.One & MaskKnown.One; 643 Known.One |= RHSKnown.Zero & MaskKnown.One; 644 // assume(v | b = a) 645 } else if (match(Arg, 646 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 647 Pred == ICmpInst::ICMP_EQ && 648 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 649 KnownBits RHSKnown(BitWidth); 650 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 651 KnownBits BKnown(BitWidth); 652 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 653 654 // For those bits in B that are known to be zero, we can propagate known 655 // bits from the RHS to V. 656 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 657 Known.One |= RHSKnown.One & BKnown.Zero; 658 // assume(~(v | b) = a) 659 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 660 m_Value(A))) && 661 Pred == ICmpInst::ICMP_EQ && 662 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 663 KnownBits RHSKnown(BitWidth); 664 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 665 KnownBits BKnown(BitWidth); 666 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 667 668 // For those bits in B that are known to be zero, we can propagate 669 // inverted known bits from the RHS to V. 670 Known.Zero |= RHSKnown.One & BKnown.Zero; 671 Known.One |= RHSKnown.Zero & BKnown.Zero; 672 // assume(v ^ b = a) 673 } else if (match(Arg, 674 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 675 Pred == ICmpInst::ICMP_EQ && 676 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 677 KnownBits RHSKnown(BitWidth); 678 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 679 KnownBits BKnown(BitWidth); 680 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 681 682 // For those bits in B that are known to be zero, we can propagate known 683 // bits from the RHS to V. For those bits in B that are known to be one, 684 // we can propagate inverted known bits from the RHS to V. 685 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 686 Known.One |= RHSKnown.One & BKnown.Zero; 687 Known.Zero |= RHSKnown.One & BKnown.One; 688 Known.One |= RHSKnown.Zero & BKnown.One; 689 // assume(~(v ^ b) = a) 690 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 691 m_Value(A))) && 692 Pred == ICmpInst::ICMP_EQ && 693 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 694 KnownBits RHSKnown(BitWidth); 695 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 696 KnownBits BKnown(BitWidth); 697 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 698 699 // For those bits in B that are known to be zero, we can propagate 700 // inverted known bits from the RHS to V. For those bits in B that are 701 // known to be one, we can propagate known bits from the RHS to V. 702 Known.Zero |= RHSKnown.One & BKnown.Zero; 703 Known.One |= RHSKnown.Zero & BKnown.Zero; 704 Known.Zero |= RHSKnown.Zero & BKnown.One; 705 Known.One |= RHSKnown.One & BKnown.One; 706 // assume(v << c = a) 707 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 708 m_Value(A))) && 709 Pred == ICmpInst::ICMP_EQ && 710 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 711 C < BitWidth) { 712 KnownBits RHSKnown(BitWidth); 713 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 714 // For those bits in RHS that are known, we can propagate them to known 715 // bits in V shifted to the right by C. 716 RHSKnown.Zero.lshrInPlace(C); 717 Known.Zero |= RHSKnown.Zero; 718 RHSKnown.One.lshrInPlace(C); 719 Known.One |= RHSKnown.One; 720 // assume(~(v << c) = a) 721 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 722 m_Value(A))) && 723 Pred == ICmpInst::ICMP_EQ && 724 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 725 C < BitWidth) { 726 KnownBits RHSKnown(BitWidth); 727 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 728 // For those bits in RHS that are known, we can propagate them inverted 729 // to known bits in V shifted to the right by C. 730 RHSKnown.One.lshrInPlace(C); 731 Known.Zero |= RHSKnown.One; 732 RHSKnown.Zero.lshrInPlace(C); 733 Known.One |= RHSKnown.Zero; 734 // assume(v >> c = a) 735 } else if (match(Arg, 736 m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), 737 m_Value(A))) && 738 Pred == ICmpInst::ICMP_EQ && 739 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 740 C < BitWidth) { 741 KnownBits RHSKnown(BitWidth); 742 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 743 // For those bits in RHS that are known, we can propagate them to known 744 // bits in V shifted to the right by C. 745 Known.Zero |= RHSKnown.Zero << C; 746 Known.One |= RHSKnown.One << C; 747 // assume(~(v >> c) = a) 748 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), 749 m_Value(A))) && 750 Pred == ICmpInst::ICMP_EQ && 751 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 752 C < BitWidth) { 753 KnownBits RHSKnown(BitWidth); 754 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 755 // For those bits in RHS that are known, we can propagate them inverted 756 // to known bits in V shifted to the right by C. 757 Known.Zero |= RHSKnown.One << C; 758 Known.One |= RHSKnown.Zero << C; 759 // assume(v >=_s c) where c is non-negative 760 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 761 Pred == ICmpInst::ICMP_SGE && 762 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 763 KnownBits RHSKnown(BitWidth); 764 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 765 766 if (RHSKnown.isNonNegative()) { 767 // We know that the sign bit is zero. 768 Known.makeNonNegative(); 769 } 770 // assume(v >_s c) where c is at least -1. 771 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 772 Pred == ICmpInst::ICMP_SGT && 773 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 774 KnownBits RHSKnown(BitWidth); 775 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 776 777 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { 778 // We know that the sign bit is zero. 779 Known.makeNonNegative(); 780 } 781 // assume(v <=_s c) where c is negative 782 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 783 Pred == ICmpInst::ICMP_SLE && 784 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 785 KnownBits RHSKnown(BitWidth); 786 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 787 788 if (RHSKnown.isNegative()) { 789 // We know that the sign bit is one. 790 Known.makeNegative(); 791 } 792 // assume(v <_s c) where c is non-positive 793 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 794 Pred == ICmpInst::ICMP_SLT && 795 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 796 KnownBits RHSKnown(BitWidth); 797 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 798 799 if (RHSKnown.isZero() || RHSKnown.isNegative()) { 800 // We know that the sign bit is one. 801 Known.makeNegative(); 802 } 803 // assume(v <=_u c) 804 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 805 Pred == ICmpInst::ICMP_ULE && 806 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 807 KnownBits RHSKnown(BitWidth); 808 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 809 810 // Whatever high bits in c are zero are known to be zero. 811 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 812 // assume(v <_u c) 813 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 814 Pred == ICmpInst::ICMP_ULT && 815 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 816 KnownBits RHSKnown(BitWidth); 817 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 818 819 // If the RHS is known zero, then this assumption must be wrong (nothing 820 // is unsigned less than zero). Signal a conflict and get out of here. 821 if (RHSKnown.isZero()) { 822 Known.Zero.setAllBits(); 823 Known.One.setAllBits(); 824 break; 825 } 826 827 // Whatever high bits in c are zero are known to be zero (if c is a power 828 // of 2, then one more). 829 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) 830 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); 831 else 832 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 833 } 834 } 835 836 // If assumptions conflict with each other or previous known bits, then we 837 // have a logical fallacy. It's possible that the assumption is not reachable, 838 // so this isn't a real bug. On the other hand, the program may have undefined 839 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 840 // clear out the known bits, try to warn the user, and hope for the best. 841 if (Known.Zero.intersects(Known.One)) { 842 Known.resetAll(); 843 844 if (Q.ORE) 845 Q.ORE->emit([&]() { 846 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 847 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", 848 CxtI) 849 << "Detected conflicting code assumptions. Program may " 850 "have undefined behavior, or compiler may have " 851 "internal error."; 852 }); 853 } 854 } 855 856 /// Compute known bits from a shift operator, including those with a 857 /// non-constant shift amount. Known is the output of this function. Known2 is a 858 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are 859 /// operator-specific functors that, given the known-zero or known-one bits 860 /// respectively, and a shift amount, compute the implied known-zero or 861 /// known-one bits of the shift operator's result respectively for that shift 862 /// amount. The results from calling KZF and KOF are conservatively combined for 863 /// all permitted shift amounts. 864 static void computeKnownBitsFromShiftOperator( 865 const Operator *I, KnownBits &Known, KnownBits &Known2, 866 unsigned Depth, const Query &Q, 867 function_ref<APInt(const APInt &, unsigned)> KZF, 868 function_ref<APInt(const APInt &, unsigned)> KOF) { 869 unsigned BitWidth = Known.getBitWidth(); 870 871 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 872 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); 873 874 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 875 Known.Zero = KZF(Known.Zero, ShiftAmt); 876 Known.One = KOF(Known.One, ShiftAmt); 877 // If the known bits conflict, this must be an overflowing left shift, so 878 // the shift result is poison. We can return anything we want. Choose 0 for 879 // the best folding opportunity. 880 if (Known.hasConflict()) 881 Known.setAllZero(); 882 883 return; 884 } 885 886 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 887 888 // If the shift amount could be greater than or equal to the bit-width of the 889 // LHS, the value could be poison, but bail out because the check below is 890 // expensive. TODO: Should we just carry on? 891 if ((~Known.Zero).uge(BitWidth)) { 892 Known.resetAll(); 893 return; 894 } 895 896 // Note: We cannot use Known.Zero.getLimitedValue() here, because if 897 // BitWidth > 64 and any upper bits are known, we'll end up returning the 898 // limit value (which implies all bits are known). 899 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); 900 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); 901 902 // It would be more-clearly correct to use the two temporaries for this 903 // calculation. Reusing the APInts here to prevent unnecessary allocations. 904 Known.resetAll(); 905 906 // If we know the shifter operand is nonzero, we can sometimes infer more 907 // known bits. However this is expensive to compute, so be lazy about it and 908 // only compute it when absolutely necessary. 909 Optional<bool> ShifterOperandIsNonZero; 910 911 // Early exit if we can't constrain any well-defined shift amount. 912 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && 913 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { 914 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q); 915 if (!*ShifterOperandIsNonZero) 916 return; 917 } 918 919 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 920 921 Known.Zero.setAllBits(); 922 Known.One.setAllBits(); 923 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 924 // Combine the shifted known input bits only for those shift amounts 925 // compatible with its known constraints. 926 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 927 continue; 928 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 929 continue; 930 // If we know the shifter is nonzero, we may be able to infer more known 931 // bits. This check is sunk down as far as possible to avoid the expensive 932 // call to isKnownNonZero if the cheaper checks above fail. 933 if (ShiftAmt == 0) { 934 if (!ShifterOperandIsNonZero.hasValue()) 935 ShifterOperandIsNonZero = 936 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 937 if (*ShifterOperandIsNonZero) 938 continue; 939 } 940 941 Known.Zero &= KZF(Known2.Zero, ShiftAmt); 942 Known.One &= KOF(Known2.One, ShiftAmt); 943 } 944 945 // If the known bits conflict, the result is poison. Return a 0 and hope the 946 // caller can further optimize that. 947 if (Known.hasConflict()) 948 Known.setAllZero(); 949 } 950 951 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known, 952 unsigned Depth, const Query &Q) { 953 unsigned BitWidth = Known.getBitWidth(); 954 955 KnownBits Known2(Known); 956 switch (I->getOpcode()) { 957 default: break; 958 case Instruction::Load: 959 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 960 computeKnownBitsFromRangeMetadata(*MD, Known); 961 break; 962 case Instruction::And: { 963 // If either the LHS or the RHS are Zero, the result is zero. 964 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 965 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 966 967 // Output known-1 bits are only known if set in both the LHS & RHS. 968 Known.One &= Known2.One; 969 // Output known-0 are known to be clear if zero in either the LHS | RHS. 970 Known.Zero |= Known2.Zero; 971 972 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 973 // here we handle the more general case of adding any odd number by 974 // matching the form add(x, add(x, y)) where y is odd. 975 // TODO: This could be generalized to clearing any bit set in y where the 976 // following bit is known to be unset in y. 977 Value *Y = nullptr; 978 if (!Known.Zero[0] && !Known.One[0] && 979 (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)), 980 m_Value(Y))) || 981 match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)), 982 m_Value(Y))))) { 983 Known2.resetAll(); 984 computeKnownBits(Y, Known2, Depth + 1, Q); 985 if (Known2.countMinTrailingOnes() > 0) 986 Known.Zero.setBit(0); 987 } 988 break; 989 } 990 case Instruction::Or: 991 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 992 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 993 994 // Output known-0 bits are only known if clear in both the LHS & RHS. 995 Known.Zero &= Known2.Zero; 996 // Output known-1 are known to be set if set in either the LHS | RHS. 997 Known.One |= Known2.One; 998 break; 999 case Instruction::Xor: { 1000 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 1001 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1002 1003 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1004 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 1005 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1006 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 1007 Known.Zero = std::move(KnownZeroOut); 1008 break; 1009 } 1010 case Instruction::Mul: { 1011 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1012 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known, 1013 Known2, Depth, Q); 1014 break; 1015 } 1016 case Instruction::UDiv: { 1017 // For the purposes of computing leading zeros we can conservatively 1018 // treat a udiv as a logical right shift by the power of 2 known to 1019 // be less than the denominator. 1020 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1021 unsigned LeadZ = Known2.countMinLeadingZeros(); 1022 1023 Known2.resetAll(); 1024 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1025 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 1026 if (RHSMaxLeadingZeros != BitWidth) 1027 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 1028 1029 Known.Zero.setHighBits(LeadZ); 1030 break; 1031 } 1032 case Instruction::Select: { 1033 const Value *LHS, *RHS; 1034 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 1035 if (SelectPatternResult::isMinOrMax(SPF)) { 1036 computeKnownBits(RHS, Known, Depth + 1, Q); 1037 computeKnownBits(LHS, Known2, Depth + 1, Q); 1038 } else { 1039 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 1040 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1041 } 1042 1043 unsigned MaxHighOnes = 0; 1044 unsigned MaxHighZeros = 0; 1045 if (SPF == SPF_SMAX) { 1046 // If both sides are negative, the result is negative. 1047 if (Known.isNegative() && Known2.isNegative()) 1048 // We can derive a lower bound on the result by taking the max of the 1049 // leading one bits. 1050 MaxHighOnes = 1051 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1052 // If either side is non-negative, the result is non-negative. 1053 else if (Known.isNonNegative() || Known2.isNonNegative()) 1054 MaxHighZeros = 1; 1055 } else if (SPF == SPF_SMIN) { 1056 // If both sides are non-negative, the result is non-negative. 1057 if (Known.isNonNegative() && Known2.isNonNegative()) 1058 // We can derive an upper bound on the result by taking the max of the 1059 // leading zero bits. 1060 MaxHighZeros = std::max(Known.countMinLeadingZeros(), 1061 Known2.countMinLeadingZeros()); 1062 // If either side is negative, the result is negative. 1063 else if (Known.isNegative() || Known2.isNegative()) 1064 MaxHighOnes = 1; 1065 } else if (SPF == SPF_UMAX) { 1066 // We can derive a lower bound on the result by taking the max of the 1067 // leading one bits. 1068 MaxHighOnes = 1069 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1070 } else if (SPF == SPF_UMIN) { 1071 // We can derive an upper bound on the result by taking the max of the 1072 // leading zero bits. 1073 MaxHighZeros = 1074 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1075 } 1076 1077 // Only known if known in both the LHS and RHS. 1078 Known.One &= Known2.One; 1079 Known.Zero &= Known2.Zero; 1080 if (MaxHighOnes > 0) 1081 Known.One.setHighBits(MaxHighOnes); 1082 if (MaxHighZeros > 0) 1083 Known.Zero.setHighBits(MaxHighZeros); 1084 break; 1085 } 1086 case Instruction::FPTrunc: 1087 case Instruction::FPExt: 1088 case Instruction::FPToUI: 1089 case Instruction::FPToSI: 1090 case Instruction::SIToFP: 1091 case Instruction::UIToFP: 1092 break; // Can't work with floating point. 1093 case Instruction::PtrToInt: 1094 case Instruction::IntToPtr: 1095 // Fall through and handle them the same as zext/trunc. 1096 LLVM_FALLTHROUGH; 1097 case Instruction::ZExt: 1098 case Instruction::Trunc: { 1099 Type *SrcTy = I->getOperand(0)->getType(); 1100 1101 unsigned SrcBitWidth; 1102 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1103 // which fall through here. 1104 SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType()); 1105 1106 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1107 Known = Known.zextOrTrunc(SrcBitWidth); 1108 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1109 Known = Known.zextOrTrunc(BitWidth); 1110 // Any top bits are known to be zero. 1111 if (BitWidth > SrcBitWidth) 1112 Known.Zero.setBitsFrom(SrcBitWidth); 1113 break; 1114 } 1115 case Instruction::BitCast: { 1116 Type *SrcTy = I->getOperand(0)->getType(); 1117 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 1118 // TODO: For now, not handling conversions like: 1119 // (bitcast i64 %x to <2 x i32>) 1120 !I->getType()->isVectorTy()) { 1121 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1122 break; 1123 } 1124 break; 1125 } 1126 case Instruction::SExt: { 1127 // Compute the bits in the result that are not present in the input. 1128 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1129 1130 Known = Known.trunc(SrcBitWidth); 1131 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1132 // If the sign bit of the input is known set or clear, then we know the 1133 // top bits of the result. 1134 Known = Known.sext(BitWidth); 1135 break; 1136 } 1137 case Instruction::Shl: { 1138 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1139 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1140 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) { 1141 APInt KZResult = KnownZero << ShiftAmt; 1142 KZResult.setLowBits(ShiftAmt); // Low bits known 0. 1143 // If this shift has "nsw" keyword, then the result is either a poison 1144 // value or has the same sign bit as the first operand. 1145 if (NSW && KnownZero.isSignBitSet()) 1146 KZResult.setSignBit(); 1147 return KZResult; 1148 }; 1149 1150 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) { 1151 APInt KOResult = KnownOne << ShiftAmt; 1152 if (NSW && KnownOne.isSignBitSet()) 1153 KOResult.setSignBit(); 1154 return KOResult; 1155 }; 1156 1157 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1158 break; 1159 } 1160 case Instruction::LShr: { 1161 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1162 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1163 APInt KZResult = KnownZero.lshr(ShiftAmt); 1164 // High bits known zero. 1165 KZResult.setHighBits(ShiftAmt); 1166 return KZResult; 1167 }; 1168 1169 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1170 return KnownOne.lshr(ShiftAmt); 1171 }; 1172 1173 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1174 break; 1175 } 1176 case Instruction::AShr: { 1177 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1178 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1179 return KnownZero.ashr(ShiftAmt); 1180 }; 1181 1182 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1183 return KnownOne.ashr(ShiftAmt); 1184 }; 1185 1186 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1187 break; 1188 } 1189 case Instruction::Sub: { 1190 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1191 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1192 Known, Known2, Depth, Q); 1193 break; 1194 } 1195 case Instruction::Add: { 1196 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1197 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1198 Known, Known2, Depth, Q); 1199 break; 1200 } 1201 case Instruction::SRem: 1202 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1203 APInt RA = Rem->getValue().abs(); 1204 if (RA.isPowerOf2()) { 1205 APInt LowBits = RA - 1; 1206 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1207 1208 // The low bits of the first operand are unchanged by the srem. 1209 Known.Zero = Known2.Zero & LowBits; 1210 Known.One = Known2.One & LowBits; 1211 1212 // If the first operand is non-negative or has all low bits zero, then 1213 // the upper bits are all zero. 1214 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 1215 Known.Zero |= ~LowBits; 1216 1217 // If the first operand is negative and not all low bits are zero, then 1218 // the upper bits are all one. 1219 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 1220 Known.One |= ~LowBits; 1221 1222 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1223 break; 1224 } 1225 } 1226 1227 // The sign bit is the LHS's sign bit, except when the result of the 1228 // remainder is zero. 1229 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1230 // If it's known zero, our sign bit is also zero. 1231 if (Known2.isNonNegative()) 1232 Known.makeNonNegative(); 1233 1234 break; 1235 case Instruction::URem: { 1236 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1237 const APInt &RA = Rem->getValue(); 1238 if (RA.isPowerOf2()) { 1239 APInt LowBits = (RA - 1); 1240 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1241 Known.Zero |= ~LowBits; 1242 Known.One &= LowBits; 1243 break; 1244 } 1245 } 1246 1247 // Since the result is less than or equal to either operand, any leading 1248 // zero bits in either operand must also exist in the result. 1249 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1250 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1251 1252 unsigned Leaders = 1253 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1254 Known.resetAll(); 1255 Known.Zero.setHighBits(Leaders); 1256 break; 1257 } 1258 1259 case Instruction::Alloca: { 1260 const AllocaInst *AI = cast<AllocaInst>(I); 1261 unsigned Align = AI->getAlignment(); 1262 if (Align == 0) 1263 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); 1264 1265 if (Align > 0) 1266 Known.Zero.setLowBits(countTrailingZeros(Align)); 1267 break; 1268 } 1269 case Instruction::GetElementPtr: { 1270 // Analyze all of the subscripts of this getelementptr instruction 1271 // to determine if we can prove known low zero bits. 1272 KnownBits LocalKnown(BitWidth); 1273 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q); 1274 unsigned TrailZ = LocalKnown.countMinTrailingZeros(); 1275 1276 gep_type_iterator GTI = gep_type_begin(I); 1277 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1278 Value *Index = I->getOperand(i); 1279 if (StructType *STy = GTI.getStructTypeOrNull()) { 1280 // Handle struct member offset arithmetic. 1281 1282 // Handle case when index is vector zeroinitializer 1283 Constant *CIndex = cast<Constant>(Index); 1284 if (CIndex->isZeroValue()) 1285 continue; 1286 1287 if (CIndex->getType()->isVectorTy()) 1288 Index = CIndex->getSplatValue(); 1289 1290 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1291 const StructLayout *SL = Q.DL.getStructLayout(STy); 1292 uint64_t Offset = SL->getElementOffset(Idx); 1293 TrailZ = std::min<unsigned>(TrailZ, 1294 countTrailingZeros(Offset)); 1295 } else { 1296 // Handle array index arithmetic. 1297 Type *IndexedTy = GTI.getIndexedType(); 1298 if (!IndexedTy->isSized()) { 1299 TrailZ = 0; 1300 break; 1301 } 1302 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1303 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1304 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0); 1305 computeKnownBits(Index, LocalKnown, Depth + 1, Q); 1306 TrailZ = std::min(TrailZ, 1307 unsigned(countTrailingZeros(TypeSize) + 1308 LocalKnown.countMinTrailingZeros())); 1309 } 1310 } 1311 1312 Known.Zero.setLowBits(TrailZ); 1313 break; 1314 } 1315 case Instruction::PHI: { 1316 const PHINode *P = cast<PHINode>(I); 1317 // Handle the case of a simple two-predecessor recurrence PHI. 1318 // There's a lot more that could theoretically be done here, but 1319 // this is sufficient to catch some interesting cases. 1320 if (P->getNumIncomingValues() == 2) { 1321 for (unsigned i = 0; i != 2; ++i) { 1322 Value *L = P->getIncomingValue(i); 1323 Value *R = P->getIncomingValue(!i); 1324 Operator *LU = dyn_cast<Operator>(L); 1325 if (!LU) 1326 continue; 1327 unsigned Opcode = LU->getOpcode(); 1328 // Check for operations that have the property that if 1329 // both their operands have low zero bits, the result 1330 // will have low zero bits. 1331 if (Opcode == Instruction::Add || 1332 Opcode == Instruction::Sub || 1333 Opcode == Instruction::And || 1334 Opcode == Instruction::Or || 1335 Opcode == Instruction::Mul) { 1336 Value *LL = LU->getOperand(0); 1337 Value *LR = LU->getOperand(1); 1338 // Find a recurrence. 1339 if (LL == I) 1340 L = LR; 1341 else if (LR == I) 1342 L = LL; 1343 else 1344 break; 1345 // Ok, we have a PHI of the form L op= R. Check for low 1346 // zero bits. 1347 computeKnownBits(R, Known2, Depth + 1, Q); 1348 1349 // We need to take the minimum number of known bits 1350 KnownBits Known3(Known); 1351 computeKnownBits(L, Known3, Depth + 1, Q); 1352 1353 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1354 Known3.countMinTrailingZeros())); 1355 1356 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU); 1357 if (OverflowOp && OverflowOp->hasNoSignedWrap()) { 1358 // If initial value of recurrence is nonnegative, and we are adding 1359 // a nonnegative number with nsw, the result can only be nonnegative 1360 // or poison value regardless of the number of times we execute the 1361 // add in phi recurrence. If initial value is negative and we are 1362 // adding a negative number with nsw, the result can only be 1363 // negative or poison value. Similar arguments apply to sub and mul. 1364 // 1365 // (add non-negative, non-negative) --> non-negative 1366 // (add negative, negative) --> negative 1367 if (Opcode == Instruction::Add) { 1368 if (Known2.isNonNegative() && Known3.isNonNegative()) 1369 Known.makeNonNegative(); 1370 else if (Known2.isNegative() && Known3.isNegative()) 1371 Known.makeNegative(); 1372 } 1373 1374 // (sub nsw non-negative, negative) --> non-negative 1375 // (sub nsw negative, non-negative) --> negative 1376 else if (Opcode == Instruction::Sub && LL == I) { 1377 if (Known2.isNonNegative() && Known3.isNegative()) 1378 Known.makeNonNegative(); 1379 else if (Known2.isNegative() && Known3.isNonNegative()) 1380 Known.makeNegative(); 1381 } 1382 1383 // (mul nsw non-negative, non-negative) --> non-negative 1384 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1385 Known3.isNonNegative()) 1386 Known.makeNonNegative(); 1387 } 1388 1389 break; 1390 } 1391 } 1392 } 1393 1394 // Unreachable blocks may have zero-operand PHI nodes. 1395 if (P->getNumIncomingValues() == 0) 1396 break; 1397 1398 // Otherwise take the unions of the known bit sets of the operands, 1399 // taking conservative care to avoid excessive recursion. 1400 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) { 1401 // Skip if every incoming value references to ourself. 1402 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1403 break; 1404 1405 Known.Zero.setAllBits(); 1406 Known.One.setAllBits(); 1407 for (Value *IncValue : P->incoming_values()) { 1408 // Skip direct self references. 1409 if (IncValue == P) continue; 1410 1411 Known2 = KnownBits(BitWidth); 1412 // Recurse, but cap the recursion to one level, because we don't 1413 // want to waste time spinning around in loops. 1414 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q); 1415 Known.Zero &= Known2.Zero; 1416 Known.One &= Known2.One; 1417 // If all bits have been ruled out, there's no need to check 1418 // more operands. 1419 if (!Known.Zero && !Known.One) 1420 break; 1421 } 1422 } 1423 break; 1424 } 1425 case Instruction::Call: 1426 case Instruction::Invoke: 1427 // If range metadata is attached to this call, set known bits from that, 1428 // and then intersect with known bits based on other properties of the 1429 // function. 1430 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range)) 1431 computeKnownBitsFromRangeMetadata(*MD, Known); 1432 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) { 1433 computeKnownBits(RV, Known2, Depth + 1, Q); 1434 Known.Zero |= Known2.Zero; 1435 Known.One |= Known2.One; 1436 } 1437 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1438 switch (II->getIntrinsicID()) { 1439 default: break; 1440 case Intrinsic::bitreverse: 1441 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1442 Known.Zero |= Known2.Zero.reverseBits(); 1443 Known.One |= Known2.One.reverseBits(); 1444 break; 1445 case Intrinsic::bswap: 1446 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1447 Known.Zero |= Known2.Zero.byteSwap(); 1448 Known.One |= Known2.One.byteSwap(); 1449 break; 1450 case Intrinsic::ctlz: { 1451 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1452 // If we have a known 1, its position is our upper bound. 1453 unsigned PossibleLZ = Known2.One.countLeadingZeros(); 1454 // If this call is undefined for 0, the result will be less than 2^n. 1455 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1456 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1457 unsigned LowBits = Log2_32(PossibleLZ)+1; 1458 Known.Zero.setBitsFrom(LowBits); 1459 break; 1460 } 1461 case Intrinsic::cttz: { 1462 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1463 // If we have a known 1, its position is our upper bound. 1464 unsigned PossibleTZ = Known2.One.countTrailingZeros(); 1465 // If this call is undefined for 0, the result will be less than 2^n. 1466 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1467 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1468 unsigned LowBits = Log2_32(PossibleTZ)+1; 1469 Known.Zero.setBitsFrom(LowBits); 1470 break; 1471 } 1472 case Intrinsic::ctpop: { 1473 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1474 // We can bound the space the count needs. Also, bits known to be zero 1475 // can't contribute to the population. 1476 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1477 unsigned LowBits = Log2_32(BitsPossiblySet)+1; 1478 Known.Zero.setBitsFrom(LowBits); 1479 // TODO: we could bound KnownOne using the lower bound on the number 1480 // of bits which might be set provided by popcnt KnownOne2. 1481 break; 1482 } 1483 case Intrinsic::x86_sse42_crc32_64_64: 1484 Known.Zero.setBitsFrom(32); 1485 break; 1486 } 1487 } 1488 break; 1489 case Instruction::ExtractElement: 1490 // Look through extract element. At the moment we keep this simple and skip 1491 // tracking the specific element. But at least we might find information 1492 // valid for all elements of the vector (for example if vector is sign 1493 // extended, shifted, etc). 1494 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1495 break; 1496 case Instruction::ExtractValue: 1497 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1498 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1499 if (EVI->getNumIndices() != 1) break; 1500 if (EVI->getIndices()[0] == 0) { 1501 switch (II->getIntrinsicID()) { 1502 default: break; 1503 case Intrinsic::uadd_with_overflow: 1504 case Intrinsic::sadd_with_overflow: 1505 computeKnownBitsAddSub(true, II->getArgOperand(0), 1506 II->getArgOperand(1), false, Known, Known2, 1507 Depth, Q); 1508 break; 1509 case Intrinsic::usub_with_overflow: 1510 case Intrinsic::ssub_with_overflow: 1511 computeKnownBitsAddSub(false, II->getArgOperand(0), 1512 II->getArgOperand(1), false, Known, Known2, 1513 Depth, Q); 1514 break; 1515 case Intrinsic::umul_with_overflow: 1516 case Intrinsic::smul_with_overflow: 1517 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1518 Known, Known2, Depth, Q); 1519 break; 1520 } 1521 } 1522 } 1523 } 1524 } 1525 1526 /// Determine which bits of V are known to be either zero or one and return 1527 /// them. 1528 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { 1529 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1530 computeKnownBits(V, Known, Depth, Q); 1531 return Known; 1532 } 1533 1534 /// Determine which bits of V are known to be either zero or one and return 1535 /// them in the Known bit set. 1536 /// 1537 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1538 /// we cannot optimize based on the assumption that it is zero without changing 1539 /// it to be an explicit zero. If we don't change it to zero, other code could 1540 /// optimized based on the contradictory assumption that it is non-zero. 1541 /// Because instcombine aggressively folds operations with undef args anyway, 1542 /// this won't lose us code quality. 1543 /// 1544 /// This function is defined on values with integer type, values with pointer 1545 /// type, and vectors of integers. In the case 1546 /// where V is a vector, known zero, and known one values are the 1547 /// same width as the vector element, and the bit is set only if it is true 1548 /// for all of the elements in the vector. 1549 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 1550 const Query &Q) { 1551 assert(V && "No Value?"); 1552 assert(Depth <= MaxDepth && "Limit Search Depth"); 1553 unsigned BitWidth = Known.getBitWidth(); 1554 1555 assert((V->getType()->isIntOrIntVectorTy(BitWidth) || 1556 V->getType()->isPtrOrPtrVectorTy()) && 1557 "Not integer or pointer type!"); 1558 assert(Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth && 1559 "V and Known should have same BitWidth"); 1560 (void)BitWidth; 1561 1562 const APInt *C; 1563 if (match(V, m_APInt(C))) { 1564 // We know all of the bits for a scalar constant or a splat vector constant! 1565 Known.One = *C; 1566 Known.Zero = ~Known.One; 1567 return; 1568 } 1569 // Null and aggregate-zero are all-zeros. 1570 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1571 Known.setAllZero(); 1572 return; 1573 } 1574 // Handle a constant vector by taking the intersection of the known bits of 1575 // each element. 1576 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 1577 // We know that CDS must be a vector of integers. Take the intersection of 1578 // each element. 1579 Known.Zero.setAllBits(); Known.One.setAllBits(); 1580 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1581 APInt Elt = CDS->getElementAsAPInt(i); 1582 Known.Zero &= ~Elt; 1583 Known.One &= Elt; 1584 } 1585 return; 1586 } 1587 1588 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1589 // We know that CV must be a vector of integers. Take the intersection of 1590 // each element. 1591 Known.Zero.setAllBits(); Known.One.setAllBits(); 1592 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1593 Constant *Element = CV->getAggregateElement(i); 1594 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1595 if (!ElementCI) { 1596 Known.resetAll(); 1597 return; 1598 } 1599 const APInt &Elt = ElementCI->getValue(); 1600 Known.Zero &= ~Elt; 1601 Known.One &= Elt; 1602 } 1603 return; 1604 } 1605 1606 // Start out not knowing anything. 1607 Known.resetAll(); 1608 1609 // We can't imply anything about undefs. 1610 if (isa<UndefValue>(V)) 1611 return; 1612 1613 // There's no point in looking through other users of ConstantData for 1614 // assumptions. Confirm that we've handled them all. 1615 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1616 1617 // Limit search depth. 1618 // All recursive calls that increase depth must come after this. 1619 if (Depth == MaxDepth) 1620 return; 1621 1622 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1623 // the bits of its aliasee. 1624 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1625 if (!GA->isInterposable()) 1626 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 1627 return; 1628 } 1629 1630 if (const Operator *I = dyn_cast<Operator>(V)) 1631 computeKnownBitsFromOperator(I, Known, Depth, Q); 1632 1633 // Aligned pointers have trailing zeros - refine Known.Zero set 1634 if (V->getType()->isPointerTy()) { 1635 unsigned Align = V->getPointerAlignment(Q.DL); 1636 if (Align) 1637 Known.Zero.setLowBits(countTrailingZeros(Align)); 1638 } 1639 1640 // computeKnownBitsFromAssume strictly refines Known. 1641 // Therefore, we run them after computeKnownBitsFromOperator. 1642 1643 // Check whether a nearby assume intrinsic can determine some known bits. 1644 computeKnownBitsFromAssume(V, Known, Depth, Q); 1645 1646 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1647 } 1648 1649 /// Return true if the given value is known to have exactly one 1650 /// bit set when defined. For vectors return true if every element is known to 1651 /// be a power of two when defined. Supports values with integer or pointer 1652 /// types and vectors of integers. 1653 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 1654 const Query &Q) { 1655 assert(Depth <= MaxDepth && "Limit Search Depth"); 1656 1657 // Attempt to match against constants. 1658 if (OrZero && match(V, m_Power2OrZero())) 1659 return true; 1660 if (match(V, m_Power2())) 1661 return true; 1662 1663 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1664 // it is shifted off the end then the result is undefined. 1665 if (match(V, m_Shl(m_One(), m_Value()))) 1666 return true; 1667 1668 // (signmask) >>l X is clearly a power of two if the one is not shifted off 1669 // the bottom. If it is shifted off the bottom then the result is undefined. 1670 if (match(V, m_LShr(m_SignMask(), m_Value()))) 1671 return true; 1672 1673 // The remaining tests are all recursive, so bail out if we hit the limit. 1674 if (Depth++ == MaxDepth) 1675 return false; 1676 1677 Value *X = nullptr, *Y = nullptr; 1678 // A shift left or a logical shift right of a power of two is a power of two 1679 // or zero. 1680 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1681 match(V, m_LShr(m_Value(X), m_Value())))) 1682 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 1683 1684 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1685 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 1686 1687 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 1688 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 1689 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 1690 1691 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1692 // A power of two and'd with anything is a power of two or zero. 1693 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 1694 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 1695 return true; 1696 // X & (-X) is always a power of two or zero. 1697 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1698 return true; 1699 return false; 1700 } 1701 1702 // Adding a power-of-two or zero to the same power-of-two or zero yields 1703 // either the original power-of-two, a larger power-of-two or zero. 1704 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1705 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1706 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { 1707 if (match(X, m_And(m_Specific(Y), m_Value())) || 1708 match(X, m_And(m_Value(), m_Specific(Y)))) 1709 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 1710 return true; 1711 if (match(Y, m_And(m_Specific(X), m_Value())) || 1712 match(Y, m_And(m_Value(), m_Specific(X)))) 1713 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 1714 return true; 1715 1716 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1717 KnownBits LHSBits(BitWidth); 1718 computeKnownBits(X, LHSBits, Depth, Q); 1719 1720 KnownBits RHSBits(BitWidth); 1721 computeKnownBits(Y, RHSBits, Depth, Q); 1722 // If i8 V is a power of two or zero: 1723 // ZeroBits: 1 1 1 0 1 1 1 1 1724 // ~ZeroBits: 0 0 0 1 0 0 0 0 1725 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 1726 // If OrZero isn't set, we cannot give back a zero result. 1727 // Make sure either the LHS or RHS has a bit set. 1728 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 1729 return true; 1730 } 1731 } 1732 1733 // An exact divide or right shift can only shift off zero bits, so the result 1734 // is a power of two only if the first operand is a power of two and not 1735 // copying a sign bit (sdiv int_min, 2). 1736 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1737 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1738 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1739 Depth, Q); 1740 } 1741 1742 return false; 1743 } 1744 1745 /// \brief Test whether a GEP's result is known to be non-null. 1746 /// 1747 /// Uses properties inherent in a GEP to try to determine whether it is known 1748 /// to be non-null. 1749 /// 1750 /// Currently this routine does not support vector GEPs. 1751 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 1752 const Query &Q) { 1753 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) 1754 return false; 1755 1756 // FIXME: Support vector-GEPs. 1757 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1758 1759 // If the base pointer is non-null, we cannot walk to a null address with an 1760 // inbounds GEP in address space zero. 1761 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 1762 return true; 1763 1764 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1765 // If so, then the GEP cannot produce a null pointer, as doing so would 1766 // inherently violate the inbounds contract within address space zero. 1767 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1768 GTI != GTE; ++GTI) { 1769 // Struct types are easy -- they must always be indexed by a constant. 1770 if (StructType *STy = GTI.getStructTypeOrNull()) { 1771 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1772 unsigned ElementIdx = OpC->getZExtValue(); 1773 const StructLayout *SL = Q.DL.getStructLayout(STy); 1774 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1775 if (ElementOffset > 0) 1776 return true; 1777 continue; 1778 } 1779 1780 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1781 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1782 continue; 1783 1784 // Fast path the constant operand case both for efficiency and so we don't 1785 // increment Depth when just zipping down an all-constant GEP. 1786 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1787 if (!OpC->isZero()) 1788 return true; 1789 continue; 1790 } 1791 1792 // We post-increment Depth here because while isKnownNonZero increments it 1793 // as well, when we pop back up that increment won't persist. We don't want 1794 // to recurse 10k times just because we have 10k GEP operands. We don't 1795 // bail completely out because we want to handle constant GEPs regardless 1796 // of depth. 1797 if (Depth++ >= MaxDepth) 1798 continue; 1799 1800 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 1801 return true; 1802 } 1803 1804 return false; 1805 } 1806 1807 static bool isKnownNonNullFromDominatingCondition(const Value *V, 1808 const Instruction *CtxI, 1809 const DominatorTree *DT) { 1810 assert(V->getType()->isPointerTy() && "V must be pointer type"); 1811 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull"); 1812 1813 if (!CtxI || !DT) 1814 return false; 1815 1816 unsigned NumUsesExplored = 0; 1817 for (auto *U : V->users()) { 1818 // Avoid massive lists 1819 if (NumUsesExplored >= DomConditionsMaxUses) 1820 break; 1821 NumUsesExplored++; 1822 1823 // If the value is used as an argument to a call or invoke, then argument 1824 // attributes may provide an answer about null-ness. 1825 if (auto CS = ImmutableCallSite(U)) 1826 if (auto *CalledFunc = CS.getCalledFunction()) 1827 for (const Argument &Arg : CalledFunc->args()) 1828 if (CS.getArgOperand(Arg.getArgNo()) == V && 1829 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) 1830 return true; 1831 1832 // Consider only compare instructions uniquely controlling a branch 1833 CmpInst::Predicate Pred; 1834 if (!match(const_cast<User *>(U), 1835 m_c_ICmp(Pred, m_Specific(V), m_Zero())) || 1836 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) 1837 continue; 1838 1839 for (auto *CmpU : U->users()) { 1840 if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) { 1841 assert(BI->isConditional() && "uses a comparison!"); 1842 1843 BasicBlock *NonNullSuccessor = 1844 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); 1845 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 1846 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 1847 return true; 1848 } else if (Pred == ICmpInst::ICMP_NE && 1849 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) && 1850 DT->dominates(cast<Instruction>(CmpU), CtxI)) { 1851 return true; 1852 } 1853 } 1854 } 1855 1856 return false; 1857 } 1858 1859 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1860 /// ensure that the value it's attached to is never Value? 'RangeType' is 1861 /// is the type of the value described by the range. 1862 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 1863 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1864 assert(NumRanges >= 1); 1865 for (unsigned i = 0; i < NumRanges; ++i) { 1866 ConstantInt *Lower = 1867 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1868 ConstantInt *Upper = 1869 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1870 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1871 if (Range.contains(Value)) 1872 return false; 1873 } 1874 return true; 1875 } 1876 1877 /// Return true if the given value is known to be non-zero when defined. For 1878 /// vectors, return true if every element is known to be non-zero when 1879 /// defined. For pointers, if the context instruction and dominator tree are 1880 /// specified, perform context-sensitive analysis and return true if the 1881 /// pointer couldn't possibly be null at the specified instruction. 1882 /// Supports values with integer or pointer type and vectors of integers. 1883 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) { 1884 if (auto *C = dyn_cast<Constant>(V)) { 1885 if (C->isNullValue()) 1886 return false; 1887 if (isa<ConstantInt>(C)) 1888 // Must be non-zero due to null test above. 1889 return true; 1890 1891 // For constant vectors, check that all elements are undefined or known 1892 // non-zero to determine that the whole vector is known non-zero. 1893 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) { 1894 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 1895 Constant *Elt = C->getAggregateElement(i); 1896 if (!Elt || Elt->isNullValue()) 1897 return false; 1898 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 1899 return false; 1900 } 1901 return true; 1902 } 1903 1904 // A global variable in address space 0 is non null unless extern weak 1905 // or an absolute symbol reference. Other address spaces may have null as a 1906 // valid address for a global, so we can't assume anything. 1907 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 1908 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 1909 GV->getType()->getAddressSpace() == 0) 1910 return true; 1911 } else 1912 return false; 1913 } 1914 1915 if (auto *I = dyn_cast<Instruction>(V)) { 1916 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) { 1917 // If the possible ranges don't contain zero, then the value is 1918 // definitely non-zero. 1919 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 1920 const APInt ZeroValue(Ty->getBitWidth(), 0); 1921 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 1922 return true; 1923 } 1924 } 1925 } 1926 1927 // Check for pointer simplifications. 1928 if (V->getType()->isPointerTy()) { 1929 // Alloca never returns null, malloc might. 1930 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) 1931 return true; 1932 1933 // A byval, inalloca, or nonnull argument is never null. 1934 if (const Argument *A = dyn_cast<Argument>(V)) 1935 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr()) 1936 return true; 1937 1938 // A Load tagged with nonnull metadata is never null. 1939 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 1940 if (LI->getMetadata(LLVMContext::MD_nonnull)) 1941 return true; 1942 1943 if (auto CS = ImmutableCallSite(V)) 1944 if (CS.isReturnNonNull()) 1945 return true; 1946 } 1947 1948 // The remaining tests are all recursive, so bail out if we hit the limit. 1949 if (Depth++ >= MaxDepth) 1950 return false; 1951 1952 // Check for recursive pointer simplifications. 1953 if (V->getType()->isPointerTy()) { 1954 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) 1955 return true; 1956 1957 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 1958 if (isGEPKnownNonNull(GEP, Depth, Q)) 1959 return true; 1960 } 1961 1962 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 1963 1964 // X | Y != 0 if X != 0 or Y != 0. 1965 Value *X = nullptr, *Y = nullptr; 1966 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 1967 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q); 1968 1969 // ext X != 0 if X != 0. 1970 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 1971 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 1972 1973 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 1974 // if the lowest bit is shifted off the end. 1975 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { 1976 // shl nuw can't remove any non-zero bits. 1977 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1978 if (BO->hasNoUnsignedWrap()) 1979 return isKnownNonZero(X, Depth, Q); 1980 1981 KnownBits Known(BitWidth); 1982 computeKnownBits(X, Known, Depth, Q); 1983 if (Known.One[0]) 1984 return true; 1985 } 1986 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 1987 // defined if the sign bit is shifted off the end. 1988 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 1989 // shr exact can only shift out zero bits. 1990 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 1991 if (BO->isExact()) 1992 return isKnownNonZero(X, Depth, Q); 1993 1994 KnownBits Known = computeKnownBits(X, Depth, Q); 1995 if (Known.isNegative()) 1996 return true; 1997 1998 // If the shifter operand is a constant, and all of the bits shifted 1999 // out are known to be zero, and X is known non-zero then at least one 2000 // non-zero bit must remain. 2001 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 2002 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 2003 // Is there a known one in the portion not shifted out? 2004 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) 2005 return true; 2006 // Are all the bits to be shifted out known zero? 2007 if (Known.countMinTrailingZeros() >= ShiftVal) 2008 return isKnownNonZero(X, Depth, Q); 2009 } 2010 } 2011 // div exact can only produce a zero if the dividend is zero. 2012 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 2013 return isKnownNonZero(X, Depth, Q); 2014 } 2015 // X + Y. 2016 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2017 KnownBits XKnown = computeKnownBits(X, Depth, Q); 2018 KnownBits YKnown = computeKnownBits(Y, Depth, Q); 2019 2020 // If X and Y are both non-negative (as signed values) then their sum is not 2021 // zero unless both X and Y are zero. 2022 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 2023 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q)) 2024 return true; 2025 2026 // If X and Y are both negative (as signed values) then their sum is not 2027 // zero unless both X and Y equal INT_MIN. 2028 if (XKnown.isNegative() && YKnown.isNegative()) { 2029 APInt Mask = APInt::getSignedMaxValue(BitWidth); 2030 // The sign bit of X is set. If some other bit is set then X is not equal 2031 // to INT_MIN. 2032 if (XKnown.One.intersects(Mask)) 2033 return true; 2034 // The sign bit of Y is set. If some other bit is set then Y is not equal 2035 // to INT_MIN. 2036 if (YKnown.One.intersects(Mask)) 2037 return true; 2038 } 2039 2040 // The sum of a non-negative number and a power of two is not zero. 2041 if (XKnown.isNonNegative() && 2042 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 2043 return true; 2044 if (YKnown.isNonNegative() && 2045 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 2046 return true; 2047 } 2048 // X * Y. 2049 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 2050 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2051 // If X and Y are non-zero then so is X * Y as long as the multiplication 2052 // does not overflow. 2053 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 2054 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q)) 2055 return true; 2056 } 2057 // (C ? X : Y) != 0 if X != 0 and Y != 0. 2058 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 2059 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) && 2060 isKnownNonZero(SI->getFalseValue(), Depth, Q)) 2061 return true; 2062 } 2063 // PHI 2064 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 2065 // Try and detect a recurrence that monotonically increases from a 2066 // starting value, as these are common as induction variables. 2067 if (PN->getNumIncomingValues() == 2) { 2068 Value *Start = PN->getIncomingValue(0); 2069 Value *Induction = PN->getIncomingValue(1); 2070 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 2071 std::swap(Start, Induction); 2072 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 2073 if (!C->isZero() && !C->isNegative()) { 2074 ConstantInt *X; 2075 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 2076 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 2077 !X->isNegative()) 2078 return true; 2079 } 2080 } 2081 } 2082 // Check if all incoming values are non-zero constant. 2083 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) { 2084 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero(); 2085 }); 2086 if (AllNonZeroConstants) 2087 return true; 2088 } 2089 2090 KnownBits Known(BitWidth); 2091 computeKnownBits(V, Known, Depth, Q); 2092 return Known.One != 0; 2093 } 2094 2095 /// Return true if V2 == V1 + X, where X is known non-zero. 2096 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { 2097 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2098 if (!BO || BO->getOpcode() != Instruction::Add) 2099 return false; 2100 Value *Op = nullptr; 2101 if (V2 == BO->getOperand(0)) 2102 Op = BO->getOperand(1); 2103 else if (V2 == BO->getOperand(1)) 2104 Op = BO->getOperand(0); 2105 else 2106 return false; 2107 return isKnownNonZero(Op, 0, Q); 2108 } 2109 2110 /// Return true if it is known that V1 != V2. 2111 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { 2112 if (V1 == V2) 2113 return false; 2114 if (V1->getType() != V2->getType()) 2115 // We can't look through casts yet. 2116 return false; 2117 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) 2118 return true; 2119 2120 if (V1->getType()->isIntOrIntVectorTy()) { 2121 // Are any known bits in V1 contradictory to known bits in V2? If V1 2122 // has a known zero where V2 has a known one, they must not be equal. 2123 KnownBits Known1 = computeKnownBits(V1, 0, Q); 2124 KnownBits Known2 = computeKnownBits(V2, 0, Q); 2125 2126 if (Known1.Zero.intersects(Known2.One) || 2127 Known2.Zero.intersects(Known1.One)) 2128 return true; 2129 } 2130 return false; 2131 } 2132 2133 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2134 /// simplify operations downstream. Mask is known to be zero for bits that V 2135 /// cannot have. 2136 /// 2137 /// This function is defined on values with integer type, values with pointer 2138 /// type, and vectors of integers. In the case 2139 /// where V is a vector, the mask, known zero, and known one values are the 2140 /// same width as the vector element, and the bit is set only if it is true 2141 /// for all of the elements in the vector. 2142 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2143 const Query &Q) { 2144 KnownBits Known(Mask.getBitWidth()); 2145 computeKnownBits(V, Known, Depth, Q); 2146 return Mask.isSubsetOf(Known.Zero); 2147 } 2148 2149 /// For vector constants, loop over the elements and find the constant with the 2150 /// minimum number of sign bits. Return 0 if the value is not a vector constant 2151 /// or if any element was not analyzed; otherwise, return the count for the 2152 /// element with the minimum number of sign bits. 2153 static unsigned computeNumSignBitsVectorConstant(const Value *V, 2154 unsigned TyBits) { 2155 const auto *CV = dyn_cast<Constant>(V); 2156 if (!CV || !CV->getType()->isVectorTy()) 2157 return 0; 2158 2159 unsigned MinSignBits = TyBits; 2160 unsigned NumElts = CV->getType()->getVectorNumElements(); 2161 for (unsigned i = 0; i != NumElts; ++i) { 2162 // If we find a non-ConstantInt, bail out. 2163 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 2164 if (!Elt) 2165 return 0; 2166 2167 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); 2168 } 2169 2170 return MinSignBits; 2171 } 2172 2173 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2174 const Query &Q); 2175 2176 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 2177 const Query &Q) { 2178 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q); 2179 assert(Result > 0 && "At least one sign bit needs to be present!"); 2180 return Result; 2181 } 2182 2183 /// Return the number of times the sign bit of the register is replicated into 2184 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2185 /// (itself), but other cases can give us information. For example, immediately 2186 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2187 /// other, so we return 3. For vectors, return the number of sign bits for the 2188 /// vector element with the mininum number of known sign bits. 2189 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2190 const Query &Q) { 2191 assert(Depth <= MaxDepth && "Limit Search Depth"); 2192 2193 // We return the minimum number of sign bits that are guaranteed to be present 2194 // in V, so for undef we have to conservatively return 1. We don't have the 2195 // same behavior for poison though -- that's a FIXME today. 2196 2197 unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType()); 2198 unsigned Tmp, Tmp2; 2199 unsigned FirstAnswer = 1; 2200 2201 // Note that ConstantInt is handled by the general computeKnownBits case 2202 // below. 2203 2204 if (Depth == MaxDepth) 2205 return 1; // Limit search depth. 2206 2207 const Operator *U = dyn_cast<Operator>(V); 2208 switch (Operator::getOpcode(V)) { 2209 default: break; 2210 case Instruction::SExt: 2211 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2212 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 2213 2214 case Instruction::SDiv: { 2215 const APInt *Denominator; 2216 // sdiv X, C -> adds log(C) sign bits. 2217 if (match(U->getOperand(1), m_APInt(Denominator))) { 2218 2219 // Ignore non-positive denominator. 2220 if (!Denominator->isStrictlyPositive()) 2221 break; 2222 2223 // Calculate the incoming numerator bits. 2224 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2225 2226 // Add floor(log(C)) bits to the numerator bits. 2227 return std::min(TyBits, NumBits + Denominator->logBase2()); 2228 } 2229 break; 2230 } 2231 2232 case Instruction::SRem: { 2233 const APInt *Denominator; 2234 // srem X, C -> we know that the result is within [-C+1,C) when C is a 2235 // positive constant. This let us put a lower bound on the number of sign 2236 // bits. 2237 if (match(U->getOperand(1), m_APInt(Denominator))) { 2238 2239 // Ignore non-positive denominator. 2240 if (!Denominator->isStrictlyPositive()) 2241 break; 2242 2243 // Calculate the incoming numerator bits. SRem by a positive constant 2244 // can't lower the number of sign bits. 2245 unsigned NumrBits = 2246 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2247 2248 // Calculate the leading sign bit constraints by examining the 2249 // denominator. Given that the denominator is positive, there are two 2250 // cases: 2251 // 2252 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2253 // (1 << ceilLogBase2(C)). 2254 // 2255 // 2. the numerator is negative. Then the result range is (-C,0] and 2256 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2257 // 2258 // Thus a lower bound on the number of sign bits is `TyBits - 2259 // ceilLogBase2(C)`. 2260 2261 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2262 return std::max(NumrBits, ResBits); 2263 } 2264 break; 2265 } 2266 2267 case Instruction::AShr: { 2268 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2269 // ashr X, C -> adds C sign bits. Vectors too. 2270 const APInt *ShAmt; 2271 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2272 if (ShAmt->uge(TyBits)) 2273 break; // Bad shift. 2274 unsigned ShAmtLimited = ShAmt->getZExtValue(); 2275 Tmp += ShAmtLimited; 2276 if (Tmp > TyBits) Tmp = TyBits; 2277 } 2278 return Tmp; 2279 } 2280 case Instruction::Shl: { 2281 const APInt *ShAmt; 2282 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2283 // shl destroys sign bits. 2284 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2285 if (ShAmt->uge(TyBits) || // Bad shift. 2286 ShAmt->uge(Tmp)) break; // Shifted all sign bits out. 2287 Tmp2 = ShAmt->getZExtValue(); 2288 return Tmp - Tmp2; 2289 } 2290 break; 2291 } 2292 case Instruction::And: 2293 case Instruction::Or: 2294 case Instruction::Xor: // NOT is handled here. 2295 // Logical binary ops preserve the number of sign bits at the worst. 2296 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2297 if (Tmp != 1) { 2298 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2299 FirstAnswer = std::min(Tmp, Tmp2); 2300 // We computed what we know about the sign bits as our first 2301 // answer. Now proceed to the generic code that uses 2302 // computeKnownBits, and pick whichever answer is better. 2303 } 2304 break; 2305 2306 case Instruction::Select: 2307 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2308 if (Tmp == 1) return 1; // Early out. 2309 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 2310 return std::min(Tmp, Tmp2); 2311 2312 case Instruction::Add: 2313 // Add can have at most one carry bit. Thus we know that the output 2314 // is, at worst, one more bit than the inputs. 2315 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2316 if (Tmp == 1) return 1; // Early out. 2317 2318 // Special case decrementing a value (ADD X, -1): 2319 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2320 if (CRHS->isAllOnesValue()) { 2321 KnownBits Known(TyBits); 2322 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 2323 2324 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2325 // sign bits set. 2326 if ((Known.Zero | 1).isAllOnesValue()) 2327 return TyBits; 2328 2329 // If we are subtracting one from a positive number, there is no carry 2330 // out of the result. 2331 if (Known.isNonNegative()) 2332 return Tmp; 2333 } 2334 2335 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2336 if (Tmp2 == 1) return 1; 2337 return std::min(Tmp, Tmp2)-1; 2338 2339 case Instruction::Sub: 2340 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2341 if (Tmp2 == 1) return 1; 2342 2343 // Handle NEG. 2344 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2345 if (CLHS->isNullValue()) { 2346 KnownBits Known(TyBits); 2347 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 2348 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2349 // sign bits set. 2350 if ((Known.Zero | 1).isAllOnesValue()) 2351 return TyBits; 2352 2353 // If the input is known to be positive (the sign bit is known clear), 2354 // the output of the NEG has the same number of sign bits as the input. 2355 if (Known.isNonNegative()) 2356 return Tmp2; 2357 2358 // Otherwise, we treat this like a SUB. 2359 } 2360 2361 // Sub can have at most one carry bit. Thus we know that the output 2362 // is, at worst, one more bit than the inputs. 2363 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2364 if (Tmp == 1) return 1; // Early out. 2365 return std::min(Tmp, Tmp2)-1; 2366 2367 case Instruction::Mul: { 2368 // The output of the Mul can be at most twice the valid bits in the inputs. 2369 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2370 if (SignBitsOp0 == 1) return 1; // Early out. 2371 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2372 if (SignBitsOp1 == 1) return 1; 2373 unsigned OutValidBits = 2374 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); 2375 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; 2376 } 2377 2378 case Instruction::PHI: { 2379 const PHINode *PN = cast<PHINode>(U); 2380 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2381 // Don't analyze large in-degree PHIs. 2382 if (NumIncomingValues > 4) break; 2383 // Unreachable blocks may have zero-operand PHI nodes. 2384 if (NumIncomingValues == 0) break; 2385 2386 // Take the minimum of all incoming values. This can't infinitely loop 2387 // because of our depth threshold. 2388 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q); 2389 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2390 if (Tmp == 1) return Tmp; 2391 Tmp = std::min( 2392 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q)); 2393 } 2394 return Tmp; 2395 } 2396 2397 case Instruction::Trunc: 2398 // FIXME: it's tricky to do anything useful for this, but it is an important 2399 // case for targets like X86. 2400 break; 2401 2402 case Instruction::ExtractElement: 2403 // Look through extract element. At the moment we keep this simple and skip 2404 // tracking the specific element. But at least we might find information 2405 // valid for all elements of the vector (for example if vector is sign 2406 // extended, shifted, etc). 2407 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2408 } 2409 2410 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2411 // use this information. 2412 2413 // If we can examine all elements of a vector constant successfully, we're 2414 // done (we can't do any better than that). If not, keep trying. 2415 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits)) 2416 return VecSignBits; 2417 2418 KnownBits Known(TyBits); 2419 computeKnownBits(V, Known, Depth, Q); 2420 2421 // If we know that the sign bit is either zero or one, determine the number of 2422 // identical bits in the top of the input value. 2423 return std::max(FirstAnswer, Known.countMinSignBits()); 2424 } 2425 2426 /// This function computes the integer multiple of Base that equals V. 2427 /// If successful, it returns true and returns the multiple in 2428 /// Multiple. If unsuccessful, it returns false. It looks 2429 /// through SExt instructions only if LookThroughSExt is true. 2430 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2431 bool LookThroughSExt, unsigned Depth) { 2432 const unsigned MaxDepth = 6; 2433 2434 assert(V && "No Value?"); 2435 assert(Depth <= MaxDepth && "Limit Search Depth"); 2436 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2437 2438 Type *T = V->getType(); 2439 2440 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2441 2442 if (Base == 0) 2443 return false; 2444 2445 if (Base == 1) { 2446 Multiple = V; 2447 return true; 2448 } 2449 2450 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2451 Constant *BaseVal = ConstantInt::get(T, Base); 2452 if (CO && CO == BaseVal) { 2453 // Multiple is 1. 2454 Multiple = ConstantInt::get(T, 1); 2455 return true; 2456 } 2457 2458 if (CI && CI->getZExtValue() % Base == 0) { 2459 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2460 return true; 2461 } 2462 2463 if (Depth == MaxDepth) return false; // Limit search depth. 2464 2465 Operator *I = dyn_cast<Operator>(V); 2466 if (!I) return false; 2467 2468 switch (I->getOpcode()) { 2469 default: break; 2470 case Instruction::SExt: 2471 if (!LookThroughSExt) return false; 2472 // otherwise fall through to ZExt 2473 LLVM_FALLTHROUGH; 2474 case Instruction::ZExt: 2475 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2476 LookThroughSExt, Depth+1); 2477 case Instruction::Shl: 2478 case Instruction::Mul: { 2479 Value *Op0 = I->getOperand(0); 2480 Value *Op1 = I->getOperand(1); 2481 2482 if (I->getOpcode() == Instruction::Shl) { 2483 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2484 if (!Op1CI) return false; 2485 // Turn Op0 << Op1 into Op0 * 2^Op1 2486 APInt Op1Int = Op1CI->getValue(); 2487 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2488 APInt API(Op1Int.getBitWidth(), 0); 2489 API.setBit(BitToSet); 2490 Op1 = ConstantInt::get(V->getContext(), API); 2491 } 2492 2493 Value *Mul0 = nullptr; 2494 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2495 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2496 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2497 if (Op1C->getType()->getPrimitiveSizeInBits() < 2498 MulC->getType()->getPrimitiveSizeInBits()) 2499 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2500 if (Op1C->getType()->getPrimitiveSizeInBits() > 2501 MulC->getType()->getPrimitiveSizeInBits()) 2502 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2503 2504 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2505 Multiple = ConstantExpr::getMul(MulC, Op1C); 2506 return true; 2507 } 2508 2509 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2510 if (Mul0CI->getValue() == 1) { 2511 // V == Base * Op1, so return Op1 2512 Multiple = Op1; 2513 return true; 2514 } 2515 } 2516 2517 Value *Mul1 = nullptr; 2518 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2519 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2520 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2521 if (Op0C->getType()->getPrimitiveSizeInBits() < 2522 MulC->getType()->getPrimitiveSizeInBits()) 2523 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2524 if (Op0C->getType()->getPrimitiveSizeInBits() > 2525 MulC->getType()->getPrimitiveSizeInBits()) 2526 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2527 2528 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2529 Multiple = ConstantExpr::getMul(MulC, Op0C); 2530 return true; 2531 } 2532 2533 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2534 if (Mul1CI->getValue() == 1) { 2535 // V == Base * Op0, so return Op0 2536 Multiple = Op0; 2537 return true; 2538 } 2539 } 2540 } 2541 } 2542 2543 // We could not determine if V is a multiple of Base. 2544 return false; 2545 } 2546 2547 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS, 2548 const TargetLibraryInfo *TLI) { 2549 const Function *F = ICS.getCalledFunction(); 2550 if (!F) 2551 return Intrinsic::not_intrinsic; 2552 2553 if (F->isIntrinsic()) 2554 return F->getIntrinsicID(); 2555 2556 if (!TLI) 2557 return Intrinsic::not_intrinsic; 2558 2559 LibFunc Func; 2560 // We're going to make assumptions on the semantics of the functions, check 2561 // that the target knows that it's available in this environment and it does 2562 // not have local linkage. 2563 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func)) 2564 return Intrinsic::not_intrinsic; 2565 2566 if (!ICS.onlyReadsMemory()) 2567 return Intrinsic::not_intrinsic; 2568 2569 // Otherwise check if we have a call to a function that can be turned into a 2570 // vector intrinsic. 2571 switch (Func) { 2572 default: 2573 break; 2574 case LibFunc_sin: 2575 case LibFunc_sinf: 2576 case LibFunc_sinl: 2577 return Intrinsic::sin; 2578 case LibFunc_cos: 2579 case LibFunc_cosf: 2580 case LibFunc_cosl: 2581 return Intrinsic::cos; 2582 case LibFunc_exp: 2583 case LibFunc_expf: 2584 case LibFunc_expl: 2585 return Intrinsic::exp; 2586 case LibFunc_exp2: 2587 case LibFunc_exp2f: 2588 case LibFunc_exp2l: 2589 return Intrinsic::exp2; 2590 case LibFunc_log: 2591 case LibFunc_logf: 2592 case LibFunc_logl: 2593 return Intrinsic::log; 2594 case LibFunc_log10: 2595 case LibFunc_log10f: 2596 case LibFunc_log10l: 2597 return Intrinsic::log10; 2598 case LibFunc_log2: 2599 case LibFunc_log2f: 2600 case LibFunc_log2l: 2601 return Intrinsic::log2; 2602 case LibFunc_fabs: 2603 case LibFunc_fabsf: 2604 case LibFunc_fabsl: 2605 return Intrinsic::fabs; 2606 case LibFunc_fmin: 2607 case LibFunc_fminf: 2608 case LibFunc_fminl: 2609 return Intrinsic::minnum; 2610 case LibFunc_fmax: 2611 case LibFunc_fmaxf: 2612 case LibFunc_fmaxl: 2613 return Intrinsic::maxnum; 2614 case LibFunc_copysign: 2615 case LibFunc_copysignf: 2616 case LibFunc_copysignl: 2617 return Intrinsic::copysign; 2618 case LibFunc_floor: 2619 case LibFunc_floorf: 2620 case LibFunc_floorl: 2621 return Intrinsic::floor; 2622 case LibFunc_ceil: 2623 case LibFunc_ceilf: 2624 case LibFunc_ceill: 2625 return Intrinsic::ceil; 2626 case LibFunc_trunc: 2627 case LibFunc_truncf: 2628 case LibFunc_truncl: 2629 return Intrinsic::trunc; 2630 case LibFunc_rint: 2631 case LibFunc_rintf: 2632 case LibFunc_rintl: 2633 return Intrinsic::rint; 2634 case LibFunc_nearbyint: 2635 case LibFunc_nearbyintf: 2636 case LibFunc_nearbyintl: 2637 return Intrinsic::nearbyint; 2638 case LibFunc_round: 2639 case LibFunc_roundf: 2640 case LibFunc_roundl: 2641 return Intrinsic::round; 2642 case LibFunc_pow: 2643 case LibFunc_powf: 2644 case LibFunc_powl: 2645 return Intrinsic::pow; 2646 case LibFunc_sqrt: 2647 case LibFunc_sqrtf: 2648 case LibFunc_sqrtl: 2649 return Intrinsic::sqrt; 2650 } 2651 2652 return Intrinsic::not_intrinsic; 2653 } 2654 2655 /// Return true if we can prove that the specified FP value is never equal to 2656 /// -0.0. 2657 /// 2658 /// NOTE: this function will need to be revisited when we support non-default 2659 /// rounding modes! 2660 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 2661 unsigned Depth) { 2662 if (auto *CFP = dyn_cast<ConstantFP>(V)) 2663 return !CFP->getValueAPF().isNegZero(); 2664 2665 // Limit search depth. 2666 if (Depth == MaxDepth) 2667 return false; 2668 2669 auto *Op = dyn_cast<Operator>(V); 2670 if (!Op) 2671 return false; 2672 2673 // Check if the nsz fast-math flag is set. 2674 if (auto *FPO = dyn_cast<FPMathOperator>(Op)) 2675 if (FPO->hasNoSignedZeros()) 2676 return true; 2677 2678 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. 2679 if (match(Op, m_FAdd(m_Value(), m_Zero()))) 2680 return true; 2681 2682 // sitofp and uitofp turn into +0.0 for zero. 2683 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) 2684 return true; 2685 2686 if (auto *Call = dyn_cast<CallInst>(Op)) { 2687 Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI); 2688 switch (IID) { 2689 default: 2690 break; 2691 // sqrt(-0.0) = -0.0, no other negative results are possible. 2692 case Intrinsic::sqrt: 2693 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); 2694 // fabs(x) != -0.0 2695 case Intrinsic::fabs: 2696 return true; 2697 } 2698 } 2699 2700 return false; 2701 } 2702 2703 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 2704 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 2705 /// bit despite comparing equal. 2706 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 2707 const TargetLibraryInfo *TLI, 2708 bool SignBitOnly, 2709 unsigned Depth) { 2710 // TODO: This function does not do the right thing when SignBitOnly is true 2711 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 2712 // which flips the sign bits of NaNs. See 2713 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2714 2715 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2716 return !CFP->getValueAPF().isNegative() || 2717 (!SignBitOnly && CFP->getValueAPF().isZero()); 2718 } 2719 2720 if (Depth == MaxDepth) 2721 return false; // Limit search depth. 2722 2723 const Operator *I = dyn_cast<Operator>(V); 2724 if (!I) 2725 return false; 2726 2727 switch (I->getOpcode()) { 2728 default: 2729 break; 2730 // Unsigned integers are always nonnegative. 2731 case Instruction::UIToFP: 2732 return true; 2733 case Instruction::FMul: 2734 // x*x is always non-negative or a NaN. 2735 if (I->getOperand(0) == I->getOperand(1) && 2736 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 2737 return true; 2738 2739 LLVM_FALLTHROUGH; 2740 case Instruction::FAdd: 2741 case Instruction::FDiv: 2742 case Instruction::FRem: 2743 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2744 Depth + 1) && 2745 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2746 Depth + 1); 2747 case Instruction::Select: 2748 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2749 Depth + 1) && 2750 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2751 Depth + 1); 2752 case Instruction::FPExt: 2753 case Instruction::FPTrunc: 2754 // Widening/narrowing never change sign. 2755 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2756 Depth + 1); 2757 case Instruction::Call: 2758 const auto *CI = cast<CallInst>(I); 2759 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2760 switch (IID) { 2761 default: 2762 break; 2763 case Intrinsic::maxnum: 2764 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2765 Depth + 1) || 2766 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2767 Depth + 1); 2768 case Intrinsic::minnum: 2769 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2770 Depth + 1) && 2771 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2772 Depth + 1); 2773 case Intrinsic::exp: 2774 case Intrinsic::exp2: 2775 case Intrinsic::fabs: 2776 return true; 2777 2778 case Intrinsic::sqrt: 2779 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 2780 if (!SignBitOnly) 2781 return true; 2782 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 2783 CannotBeNegativeZero(CI->getOperand(0), TLI)); 2784 2785 case Intrinsic::powi: 2786 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 2787 // powi(x,n) is non-negative if n is even. 2788 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 2789 return true; 2790 } 2791 // TODO: This is not correct. Given that exp is an integer, here are the 2792 // ways that pow can return a negative value: 2793 // 2794 // pow(x, exp) --> negative if exp is odd and x is negative. 2795 // pow(-0, exp) --> -inf if exp is negative odd. 2796 // pow(-0, exp) --> -0 if exp is positive odd. 2797 // pow(-inf, exp) --> -0 if exp is negative odd. 2798 // pow(-inf, exp) --> -inf if exp is positive odd. 2799 // 2800 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 2801 // but we must return false if x == -0. Unfortunately we do not currently 2802 // have a way of expressing this constraint. See details in 2803 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2804 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2805 Depth + 1); 2806 2807 case Intrinsic::fma: 2808 case Intrinsic::fmuladd: 2809 // x*x+y is non-negative if y is non-negative. 2810 return I->getOperand(0) == I->getOperand(1) && 2811 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 2812 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2813 Depth + 1); 2814 } 2815 break; 2816 } 2817 return false; 2818 } 2819 2820 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 2821 const TargetLibraryInfo *TLI) { 2822 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 2823 } 2824 2825 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 2826 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 2827 } 2828 2829 bool llvm::isKnownNeverNaN(const Value *V) { 2830 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"); 2831 2832 // If we're told that NaNs won't happen, assume they won't. 2833 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 2834 if (FPMathOp->hasNoNaNs()) 2835 return true; 2836 2837 // TODO: Handle instructions and potentially recurse like other 'isKnown' 2838 // functions. For example, the result of sitofp is never NaN. 2839 2840 // Handle scalar constants. 2841 if (auto *CFP = dyn_cast<ConstantFP>(V)) 2842 return !CFP->isNaN(); 2843 2844 // Bail out for constant expressions, but try to handle vector constants. 2845 if (!V->getType()->isVectorTy() || !isa<Constant>(V)) 2846 return false; 2847 2848 // For vectors, verify that each element is not NaN. 2849 unsigned NumElts = V->getType()->getVectorNumElements(); 2850 for (unsigned i = 0; i != NumElts; ++i) { 2851 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 2852 if (!Elt) 2853 return false; 2854 if (isa<UndefValue>(Elt)) 2855 continue; 2856 auto *CElt = dyn_cast<ConstantFP>(Elt); 2857 if (!CElt || CElt->isNaN()) 2858 return false; 2859 } 2860 // All elements were confirmed not-NaN or undefined. 2861 return true; 2862 } 2863 2864 /// If the specified value can be set by repeating the same byte in memory, 2865 /// return the i8 value that it is represented with. This is 2866 /// true for all i8 values obviously, but is also true for i32 0, i32 -1, 2867 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 2868 /// byte store (e.g. i16 0x1234), return null. 2869 Value *llvm::isBytewiseValue(Value *V) { 2870 // All byte-wide stores are splatable, even of arbitrary variables. 2871 if (V->getType()->isIntegerTy(8)) return V; 2872 2873 // Handle 'null' ConstantArrayZero etc. 2874 if (Constant *C = dyn_cast<Constant>(V)) 2875 if (C->isNullValue()) 2876 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 2877 2878 // Constant float and double values can be handled as integer values if the 2879 // corresponding integer value is "byteable". An important case is 0.0. 2880 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2881 if (CFP->getType()->isFloatTy()) 2882 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 2883 if (CFP->getType()->isDoubleTy()) 2884 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 2885 // Don't handle long double formats, which have strange constraints. 2886 } 2887 2888 // We can handle constant integers that are multiple of 8 bits. 2889 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 2890 if (CI->getBitWidth() % 8 == 0) { 2891 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 2892 2893 if (!CI->getValue().isSplat(8)) 2894 return nullptr; 2895 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8)); 2896 } 2897 } 2898 2899 // A ConstantDataArray/Vector is splatable if all its members are equal and 2900 // also splatable. 2901 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 2902 Value *Elt = CA->getElementAsConstant(0); 2903 Value *Val = isBytewiseValue(Elt); 2904 if (!Val) 2905 return nullptr; 2906 2907 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 2908 if (CA->getElementAsConstant(I) != Elt) 2909 return nullptr; 2910 2911 return Val; 2912 } 2913 2914 // Conceptually, we could handle things like: 2915 // %a = zext i8 %X to i16 2916 // %b = shl i16 %a, 8 2917 // %c = or i16 %a, %b 2918 // but until there is an example that actually needs this, it doesn't seem 2919 // worth worrying about. 2920 return nullptr; 2921 } 2922 2923 // This is the recursive version of BuildSubAggregate. It takes a few different 2924 // arguments. Idxs is the index within the nested struct From that we are 2925 // looking at now (which is of type IndexedType). IdxSkip is the number of 2926 // indices from Idxs that should be left out when inserting into the resulting 2927 // struct. To is the result struct built so far, new insertvalue instructions 2928 // build on that. 2929 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 2930 SmallVectorImpl<unsigned> &Idxs, 2931 unsigned IdxSkip, 2932 Instruction *InsertBefore) { 2933 StructType *STy = dyn_cast<StructType>(IndexedType); 2934 if (STy) { 2935 // Save the original To argument so we can modify it 2936 Value *OrigTo = To; 2937 // General case, the type indexed by Idxs is a struct 2938 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2939 // Process each struct element recursively 2940 Idxs.push_back(i); 2941 Value *PrevTo = To; 2942 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 2943 InsertBefore); 2944 Idxs.pop_back(); 2945 if (!To) { 2946 // Couldn't find any inserted value for this index? Cleanup 2947 while (PrevTo != OrigTo) { 2948 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 2949 PrevTo = Del->getAggregateOperand(); 2950 Del->eraseFromParent(); 2951 } 2952 // Stop processing elements 2953 break; 2954 } 2955 } 2956 // If we successfully found a value for each of our subaggregates 2957 if (To) 2958 return To; 2959 } 2960 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 2961 // the struct's elements had a value that was inserted directly. In the latter 2962 // case, perhaps we can't determine each of the subelements individually, but 2963 // we might be able to find the complete struct somewhere. 2964 2965 // Find the value that is at that particular spot 2966 Value *V = FindInsertedValue(From, Idxs); 2967 2968 if (!V) 2969 return nullptr; 2970 2971 // Insert the value in the new (sub) aggregrate 2972 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 2973 "tmp", InsertBefore); 2974 } 2975 2976 // This helper takes a nested struct and extracts a part of it (which is again a 2977 // struct) into a new value. For example, given the struct: 2978 // { a, { b, { c, d }, e } } 2979 // and the indices "1, 1" this returns 2980 // { c, d }. 2981 // 2982 // It does this by inserting an insertvalue for each element in the resulting 2983 // struct, as opposed to just inserting a single struct. This will only work if 2984 // each of the elements of the substruct are known (ie, inserted into From by an 2985 // insertvalue instruction somewhere). 2986 // 2987 // All inserted insertvalue instructions are inserted before InsertBefore 2988 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 2989 Instruction *InsertBefore) { 2990 assert(InsertBefore && "Must have someplace to insert!"); 2991 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 2992 idx_range); 2993 Value *To = UndefValue::get(IndexedType); 2994 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 2995 unsigned IdxSkip = Idxs.size(); 2996 2997 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 2998 } 2999 3000 /// Given an aggregrate and an sequence of indices, see if 3001 /// the scalar value indexed is already around as a register, for example if it 3002 /// were inserted directly into the aggregrate. 3003 /// 3004 /// If InsertBefore is not null, this function will duplicate (modified) 3005 /// insertvalues when a part of a nested struct is extracted. 3006 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 3007 Instruction *InsertBefore) { 3008 // Nothing to index? Just return V then (this is useful at the end of our 3009 // recursion). 3010 if (idx_range.empty()) 3011 return V; 3012 // We have indices, so V should have an indexable type. 3013 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 3014 "Not looking at a struct or array?"); 3015 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 3016 "Invalid indices for type?"); 3017 3018 if (Constant *C = dyn_cast<Constant>(V)) { 3019 C = C->getAggregateElement(idx_range[0]); 3020 if (!C) return nullptr; 3021 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 3022 } 3023 3024 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 3025 // Loop the indices for the insertvalue instruction in parallel with the 3026 // requested indices 3027 const unsigned *req_idx = idx_range.begin(); 3028 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 3029 i != e; ++i, ++req_idx) { 3030 if (req_idx == idx_range.end()) { 3031 // We can't handle this without inserting insertvalues 3032 if (!InsertBefore) 3033 return nullptr; 3034 3035 // The requested index identifies a part of a nested aggregate. Handle 3036 // this specially. For example, 3037 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 3038 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 3039 // %C = extractvalue {i32, { i32, i32 } } %B, 1 3040 // This can be changed into 3041 // %A = insertvalue {i32, i32 } undef, i32 10, 0 3042 // %C = insertvalue {i32, i32 } %A, i32 11, 1 3043 // which allows the unused 0,0 element from the nested struct to be 3044 // removed. 3045 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 3046 InsertBefore); 3047 } 3048 3049 // This insert value inserts something else than what we are looking for. 3050 // See if the (aggregate) value inserted into has the value we are 3051 // looking for, then. 3052 if (*req_idx != *i) 3053 return FindInsertedValue(I->getAggregateOperand(), idx_range, 3054 InsertBefore); 3055 } 3056 // If we end up here, the indices of the insertvalue match with those 3057 // requested (though possibly only partially). Now we recursively look at 3058 // the inserted value, passing any remaining indices. 3059 return FindInsertedValue(I->getInsertedValueOperand(), 3060 makeArrayRef(req_idx, idx_range.end()), 3061 InsertBefore); 3062 } 3063 3064 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 3065 // If we're extracting a value from an aggregate that was extracted from 3066 // something else, we can extract from that something else directly instead. 3067 // However, we will need to chain I's indices with the requested indices. 3068 3069 // Calculate the number of indices required 3070 unsigned size = I->getNumIndices() + idx_range.size(); 3071 // Allocate some space to put the new indices in 3072 SmallVector<unsigned, 5> Idxs; 3073 Idxs.reserve(size); 3074 // Add indices from the extract value instruction 3075 Idxs.append(I->idx_begin(), I->idx_end()); 3076 3077 // Add requested indices 3078 Idxs.append(idx_range.begin(), idx_range.end()); 3079 3080 assert(Idxs.size() == size 3081 && "Number of indices added not correct?"); 3082 3083 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 3084 } 3085 // Otherwise, we don't know (such as, extracting from a function return value 3086 // or load instruction) 3087 return nullptr; 3088 } 3089 3090 /// Analyze the specified pointer to see if it can be expressed as a base 3091 /// pointer plus a constant offset. Return the base and offset to the caller. 3092 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 3093 const DataLayout &DL) { 3094 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType()); 3095 APInt ByteOffset(BitWidth, 0); 3096 3097 // We walk up the defs but use a visited set to handle unreachable code. In 3098 // that case, we stop after accumulating the cycle once (not that it 3099 // matters). 3100 SmallPtrSet<Value *, 16> Visited; 3101 while (Visited.insert(Ptr).second) { 3102 if (Ptr->getType()->isVectorTy()) 3103 break; 3104 3105 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 3106 // If one of the values we have visited is an addrspacecast, then 3107 // the pointer type of this GEP may be different from the type 3108 // of the Ptr parameter which was passed to this function. This 3109 // means when we construct GEPOffset, we need to use the size 3110 // of GEP's pointer type rather than the size of the original 3111 // pointer type. 3112 APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0); 3113 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 3114 break; 3115 3116 ByteOffset += GEPOffset.getSExtValue(); 3117 3118 Ptr = GEP->getPointerOperand(); 3119 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 3120 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 3121 Ptr = cast<Operator>(Ptr)->getOperand(0); 3122 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 3123 if (GA->isInterposable()) 3124 break; 3125 Ptr = GA->getAliasee(); 3126 } else { 3127 break; 3128 } 3129 } 3130 Offset = ByteOffset.getSExtValue(); 3131 return Ptr; 3132 } 3133 3134 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 3135 unsigned CharSize) { 3136 // Make sure the GEP has exactly three arguments. 3137 if (GEP->getNumOperands() != 3) 3138 return false; 3139 3140 // Make sure the index-ee is a pointer to array of \p CharSize integers. 3141 // CharSize. 3142 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 3143 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 3144 return false; 3145 3146 // Check to make sure that the first operand of the GEP is an integer and 3147 // has value 0 so that we are sure we're indexing into the initializer. 3148 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 3149 if (!FirstIdx || !FirstIdx->isZero()) 3150 return false; 3151 3152 return true; 3153 } 3154 3155 bool llvm::getConstantDataArrayInfo(const Value *V, 3156 ConstantDataArraySlice &Slice, 3157 unsigned ElementSize, uint64_t Offset) { 3158 assert(V); 3159 3160 // Look through bitcast instructions and geps. 3161 V = V->stripPointerCasts(); 3162 3163 // If the value is a GEP instruction or constant expression, treat it as an 3164 // offset. 3165 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3166 // The GEP operator should be based on a pointer to string constant, and is 3167 // indexing into the string constant. 3168 if (!isGEPBasedOnPointerToString(GEP, ElementSize)) 3169 return false; 3170 3171 // If the second index isn't a ConstantInt, then this is a variable index 3172 // into the array. If this occurs, we can't say anything meaningful about 3173 // the string. 3174 uint64_t StartIdx = 0; 3175 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 3176 StartIdx = CI->getZExtValue(); 3177 else 3178 return false; 3179 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize, 3180 StartIdx + Offset); 3181 } 3182 3183 // The GEP instruction, constant or instruction, must reference a global 3184 // variable that is a constant and is initialized. The referenced constant 3185 // initializer is the array that we'll use for optimization. 3186 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 3187 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 3188 return false; 3189 3190 const ConstantDataArray *Array; 3191 ArrayType *ArrayTy; 3192 if (GV->getInitializer()->isNullValue()) { 3193 Type *GVTy = GV->getValueType(); 3194 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) { 3195 // A zeroinitializer for the array; there is no ConstantDataArray. 3196 Array = nullptr; 3197 } else { 3198 const DataLayout &DL = GV->getParent()->getDataLayout(); 3199 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy); 3200 uint64_t Length = SizeInBytes / (ElementSize / 8); 3201 if (Length <= Offset) 3202 return false; 3203 3204 Slice.Array = nullptr; 3205 Slice.Offset = 0; 3206 Slice.Length = Length - Offset; 3207 return true; 3208 } 3209 } else { 3210 // This must be a ConstantDataArray. 3211 Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 3212 if (!Array) 3213 return false; 3214 ArrayTy = Array->getType(); 3215 } 3216 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize)) 3217 return false; 3218 3219 uint64_t NumElts = ArrayTy->getArrayNumElements(); 3220 if (Offset > NumElts) 3221 return false; 3222 3223 Slice.Array = Array; 3224 Slice.Offset = Offset; 3225 Slice.Length = NumElts - Offset; 3226 return true; 3227 } 3228 3229 /// This function computes the length of a null-terminated C string pointed to 3230 /// by V. If successful, it returns true and returns the string in Str. 3231 /// If unsuccessful, it returns false. 3232 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 3233 uint64_t Offset, bool TrimAtNul) { 3234 ConstantDataArraySlice Slice; 3235 if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) 3236 return false; 3237 3238 if (Slice.Array == nullptr) { 3239 if (TrimAtNul) { 3240 Str = StringRef(); 3241 return true; 3242 } 3243 if (Slice.Length == 1) { 3244 Str = StringRef("", 1); 3245 return true; 3246 } 3247 // We cannot instantiate a StringRef as we do not have an appropriate string 3248 // of 0s at hand. 3249 return false; 3250 } 3251 3252 // Start out with the entire array in the StringRef. 3253 Str = Slice.Array->getAsString(); 3254 // Skip over 'offset' bytes. 3255 Str = Str.substr(Slice.Offset); 3256 3257 if (TrimAtNul) { 3258 // Trim off the \0 and anything after it. If the array is not nul 3259 // terminated, we just return the whole end of string. The client may know 3260 // some other way that the string is length-bound. 3261 Str = Str.substr(0, Str.find('\0')); 3262 } 3263 return true; 3264 } 3265 3266 // These next two are very similar to the above, but also look through PHI 3267 // nodes. 3268 // TODO: See if we can integrate these two together. 3269 3270 /// If we can compute the length of the string pointed to by 3271 /// the specified pointer, return 'len+1'. If we can't, return 0. 3272 static uint64_t GetStringLengthH(const Value *V, 3273 SmallPtrSetImpl<const PHINode*> &PHIs, 3274 unsigned CharSize) { 3275 // Look through noop bitcast instructions. 3276 V = V->stripPointerCasts(); 3277 3278 // If this is a PHI node, there are two cases: either we have already seen it 3279 // or we haven't. 3280 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 3281 if (!PHIs.insert(PN).second) 3282 return ~0ULL; // already in the set. 3283 3284 // If it was new, see if all the input strings are the same length. 3285 uint64_t LenSoFar = ~0ULL; 3286 for (Value *IncValue : PN->incoming_values()) { 3287 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 3288 if (Len == 0) return 0; // Unknown length -> unknown. 3289 3290 if (Len == ~0ULL) continue; 3291 3292 if (Len != LenSoFar && LenSoFar != ~0ULL) 3293 return 0; // Disagree -> unknown. 3294 LenSoFar = Len; 3295 } 3296 3297 // Success, all agree. 3298 return LenSoFar; 3299 } 3300 3301 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 3302 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 3303 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 3304 if (Len1 == 0) return 0; 3305 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 3306 if (Len2 == 0) return 0; 3307 if (Len1 == ~0ULL) return Len2; 3308 if (Len2 == ~0ULL) return Len1; 3309 if (Len1 != Len2) return 0; 3310 return Len1; 3311 } 3312 3313 // Otherwise, see if we can read the string. 3314 ConstantDataArraySlice Slice; 3315 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 3316 return 0; 3317 3318 if (Slice.Array == nullptr) 3319 return 1; 3320 3321 // Search for nul characters 3322 unsigned NullIndex = 0; 3323 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 3324 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 3325 break; 3326 } 3327 3328 return NullIndex + 1; 3329 } 3330 3331 /// If we can compute the length of the string pointed to by 3332 /// the specified pointer, return 'len+1'. If we can't, return 0. 3333 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 3334 if (!V->getType()->isPointerTy()) return 0; 3335 3336 SmallPtrSet<const PHINode*, 32> PHIs; 3337 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 3338 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 3339 // an empty string as a length. 3340 return Len == ~0ULL ? 1 : Len; 3341 } 3342 3343 /// \brief \p PN defines a loop-variant pointer to an object. Check if the 3344 /// previous iteration of the loop was referring to the same object as \p PN. 3345 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 3346 const LoopInfo *LI) { 3347 // Find the loop-defined value. 3348 Loop *L = LI->getLoopFor(PN->getParent()); 3349 if (PN->getNumIncomingValues() != 2) 3350 return true; 3351 3352 // Find the value from previous iteration. 3353 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 3354 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3355 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 3356 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3357 return true; 3358 3359 // If a new pointer is loaded in the loop, the pointer references a different 3360 // object in every iteration. E.g.: 3361 // for (i) 3362 // int *p = a[i]; 3363 // ... 3364 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 3365 if (!L->isLoopInvariant(Load->getPointerOperand())) 3366 return false; 3367 return true; 3368 } 3369 3370 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 3371 unsigned MaxLookup) { 3372 if (!V->getType()->isPointerTy()) 3373 return V; 3374 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 3375 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3376 V = GEP->getPointerOperand(); 3377 } else if (Operator::getOpcode(V) == Instruction::BitCast || 3378 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 3379 V = cast<Operator>(V)->getOperand(0); 3380 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 3381 if (GA->isInterposable()) 3382 return V; 3383 V = GA->getAliasee(); 3384 } else if (isa<AllocaInst>(V)) { 3385 // An alloca can't be further simplified. 3386 return V; 3387 } else { 3388 if (auto CS = CallSite(V)) 3389 if (Value *RV = CS.getReturnedArgOperand()) { 3390 V = RV; 3391 continue; 3392 } 3393 3394 // See if InstructionSimplify knows any relevant tricks. 3395 if (Instruction *I = dyn_cast<Instruction>(V)) 3396 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 3397 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) { 3398 V = Simplified; 3399 continue; 3400 } 3401 3402 return V; 3403 } 3404 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 3405 } 3406 return V; 3407 } 3408 3409 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 3410 const DataLayout &DL, LoopInfo *LI, 3411 unsigned MaxLookup) { 3412 SmallPtrSet<Value *, 4> Visited; 3413 SmallVector<Value *, 4> Worklist; 3414 Worklist.push_back(V); 3415 do { 3416 Value *P = Worklist.pop_back_val(); 3417 P = GetUnderlyingObject(P, DL, MaxLookup); 3418 3419 if (!Visited.insert(P).second) 3420 continue; 3421 3422 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 3423 Worklist.push_back(SI->getTrueValue()); 3424 Worklist.push_back(SI->getFalseValue()); 3425 continue; 3426 } 3427 3428 if (PHINode *PN = dyn_cast<PHINode>(P)) { 3429 // If this PHI changes the underlying object in every iteration of the 3430 // loop, don't look through it. Consider: 3431 // int **A; 3432 // for (i) { 3433 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 3434 // Curr = A[i]; 3435 // *Prev, *Curr; 3436 // 3437 // Prev is tracking Curr one iteration behind so they refer to different 3438 // underlying objects. 3439 if (!LI || !LI->isLoopHeader(PN->getParent()) || 3440 isSameUnderlyingObjectInLoop(PN, LI)) 3441 for (Value *IncValue : PN->incoming_values()) 3442 Worklist.push_back(IncValue); 3443 continue; 3444 } 3445 3446 Objects.push_back(P); 3447 } while (!Worklist.empty()); 3448 } 3449 3450 /// This is the function that does the work of looking through basic 3451 /// ptrtoint+arithmetic+inttoptr sequences. 3452 static const Value *getUnderlyingObjectFromInt(const Value *V) { 3453 do { 3454 if (const Operator *U = dyn_cast<Operator>(V)) { 3455 // If we find a ptrtoint, we can transfer control back to the 3456 // regular getUnderlyingObjectFromInt. 3457 if (U->getOpcode() == Instruction::PtrToInt) 3458 return U->getOperand(0); 3459 // If we find an add of a constant, a multiplied value, or a phi, it's 3460 // likely that the other operand will lead us to the base 3461 // object. We don't have to worry about the case where the 3462 // object address is somehow being computed by the multiply, 3463 // because our callers only care when the result is an 3464 // identifiable object. 3465 if (U->getOpcode() != Instruction::Add || 3466 (!isa<ConstantInt>(U->getOperand(1)) && 3467 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 3468 !isa<PHINode>(U->getOperand(1)))) 3469 return V; 3470 V = U->getOperand(0); 3471 } else { 3472 return V; 3473 } 3474 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 3475 } while (true); 3476 } 3477 3478 /// This is a wrapper around GetUnderlyingObjects and adds support for basic 3479 /// ptrtoint+arithmetic+inttoptr sequences. 3480 /// It returns false if unidentified object is found in GetUnderlyingObjects. 3481 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, 3482 SmallVectorImpl<Value *> &Objects, 3483 const DataLayout &DL) { 3484 SmallPtrSet<const Value *, 16> Visited; 3485 SmallVector<const Value *, 4> Working(1, V); 3486 do { 3487 V = Working.pop_back_val(); 3488 3489 SmallVector<Value *, 4> Objs; 3490 GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL); 3491 3492 for (Value *V : Objs) { 3493 if (!Visited.insert(V).second) 3494 continue; 3495 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 3496 const Value *O = 3497 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 3498 if (O->getType()->isPointerTy()) { 3499 Working.push_back(O); 3500 continue; 3501 } 3502 } 3503 // If GetUnderlyingObjects fails to find an identifiable object, 3504 // getUnderlyingObjectsForCodeGen also fails for safety. 3505 if (!isIdentifiedObject(V)) { 3506 Objects.clear(); 3507 return false; 3508 } 3509 Objects.push_back(const_cast<Value *>(V)); 3510 } 3511 } while (!Working.empty()); 3512 return true; 3513 } 3514 3515 /// Return true if the only users of this pointer are lifetime markers. 3516 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 3517 for (const User *U : V->users()) { 3518 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3519 if (!II) return false; 3520 3521 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 3522 II->getIntrinsicID() != Intrinsic::lifetime_end) 3523 return false; 3524 } 3525 return true; 3526 } 3527 3528 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3529 const Instruction *CtxI, 3530 const DominatorTree *DT) { 3531 const Operator *Inst = dyn_cast<Operator>(V); 3532 if (!Inst) 3533 return false; 3534 3535 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3536 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3537 if (C->canTrap()) 3538 return false; 3539 3540 switch (Inst->getOpcode()) { 3541 default: 3542 return true; 3543 case Instruction::UDiv: 3544 case Instruction::URem: { 3545 // x / y is undefined if y == 0. 3546 const APInt *V; 3547 if (match(Inst->getOperand(1), m_APInt(V))) 3548 return *V != 0; 3549 return false; 3550 } 3551 case Instruction::SDiv: 3552 case Instruction::SRem: { 3553 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3554 const APInt *Numerator, *Denominator; 3555 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3556 return false; 3557 // We cannot hoist this division if the denominator is 0. 3558 if (*Denominator == 0) 3559 return false; 3560 // It's safe to hoist if the denominator is not 0 or -1. 3561 if (*Denominator != -1) 3562 return true; 3563 // At this point we know that the denominator is -1. It is safe to hoist as 3564 // long we know that the numerator is not INT_MIN. 3565 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3566 return !Numerator->isMinSignedValue(); 3567 // The numerator *might* be MinSignedValue. 3568 return false; 3569 } 3570 case Instruction::Load: { 3571 const LoadInst *LI = cast<LoadInst>(Inst); 3572 if (!LI->isUnordered() || 3573 // Speculative load may create a race that did not exist in the source. 3574 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) || 3575 // Speculative load may load data from dirty regions. 3576 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 3577 LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 3578 return false; 3579 const DataLayout &DL = LI->getModule()->getDataLayout(); 3580 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), 3581 LI->getAlignment(), DL, CtxI, DT); 3582 } 3583 case Instruction::Call: { 3584 auto *CI = cast<const CallInst>(Inst); 3585 const Function *Callee = CI->getCalledFunction(); 3586 3587 // The called function could have undefined behavior or side-effects, even 3588 // if marked readnone nounwind. 3589 return Callee && Callee->isSpeculatable(); 3590 } 3591 case Instruction::VAArg: 3592 case Instruction::Alloca: 3593 case Instruction::Invoke: 3594 case Instruction::PHI: 3595 case Instruction::Store: 3596 case Instruction::Ret: 3597 case Instruction::Br: 3598 case Instruction::IndirectBr: 3599 case Instruction::Switch: 3600 case Instruction::Unreachable: 3601 case Instruction::Fence: 3602 case Instruction::AtomicRMW: 3603 case Instruction::AtomicCmpXchg: 3604 case Instruction::LandingPad: 3605 case Instruction::Resume: 3606 case Instruction::CatchSwitch: 3607 case Instruction::CatchPad: 3608 case Instruction::CatchRet: 3609 case Instruction::CleanupPad: 3610 case Instruction::CleanupRet: 3611 return false; // Misc instructions which have effects 3612 } 3613 } 3614 3615 bool llvm::mayBeMemoryDependent(const Instruction &I) { 3616 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 3617 } 3618 3619 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS, 3620 const Value *RHS, 3621 const DataLayout &DL, 3622 AssumptionCache *AC, 3623 const Instruction *CxtI, 3624 const DominatorTree *DT) { 3625 // Multiplying n * m significant bits yields a result of n + m significant 3626 // bits. If the total number of significant bits does not exceed the 3627 // result bit width (minus 1), there is no overflow. 3628 // This means if we have enough leading zero bits in the operands 3629 // we can guarantee that the result does not overflow. 3630 // Ref: "Hacker's Delight" by Henry Warren 3631 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3632 KnownBits LHSKnown(BitWidth); 3633 KnownBits RHSKnown(BitWidth); 3634 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT); 3635 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT); 3636 // Note that underestimating the number of zero bits gives a more 3637 // conservative answer. 3638 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() + 3639 RHSKnown.countMinLeadingZeros(); 3640 // First handle the easy case: if we have enough zero bits there's 3641 // definitely no overflow. 3642 if (ZeroBits >= BitWidth) 3643 return OverflowResult::NeverOverflows; 3644 3645 // Get the largest possible values for each operand. 3646 APInt LHSMax = ~LHSKnown.Zero; 3647 APInt RHSMax = ~RHSKnown.Zero; 3648 3649 // We know the multiply operation doesn't overflow if the maximum values for 3650 // each operand will not overflow after we multiply them together. 3651 bool MaxOverflow; 3652 (void)LHSMax.umul_ov(RHSMax, MaxOverflow); 3653 if (!MaxOverflow) 3654 return OverflowResult::NeverOverflows; 3655 3656 // We know it always overflows if multiplying the smallest possible values for 3657 // the operands also results in overflow. 3658 bool MinOverflow; 3659 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow); 3660 if (MinOverflow) 3661 return OverflowResult::AlwaysOverflows; 3662 3663 return OverflowResult::MayOverflow; 3664 } 3665 3666 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS, 3667 const Value *RHS, 3668 const DataLayout &DL, 3669 AssumptionCache *AC, 3670 const Instruction *CxtI, 3671 const DominatorTree *DT) { 3672 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 3673 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) { 3674 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 3675 3676 if (LHSKnown.isNegative() && RHSKnown.isNegative()) { 3677 // The sign bit is set in both cases: this MUST overflow. 3678 // Create a simple add instruction, and insert it into the struct. 3679 return OverflowResult::AlwaysOverflows; 3680 } 3681 3682 if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) { 3683 // The sign bit is clear in both cases: this CANNOT overflow. 3684 // Create a simple add instruction, and insert it into the struct. 3685 return OverflowResult::NeverOverflows; 3686 } 3687 } 3688 3689 return OverflowResult::MayOverflow; 3690 } 3691 3692 /// \brief Return true if we can prove that adding the two values of the 3693 /// knownbits will not overflow. 3694 /// Otherwise return false. 3695 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown, 3696 const KnownBits &RHSKnown) { 3697 // Addition of two 2's complement numbers having opposite signs will never 3698 // overflow. 3699 if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) || 3700 (LHSKnown.isNonNegative() && RHSKnown.isNegative())) 3701 return true; 3702 3703 // If either of the values is known to be non-negative, adding them can only 3704 // overflow if the second is also non-negative, so we can assume that. 3705 // Two non-negative numbers will only overflow if there is a carry to the 3706 // sign bit, so we can check if even when the values are as big as possible 3707 // there is no overflow to the sign bit. 3708 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) { 3709 APInt MaxLHS = ~LHSKnown.Zero; 3710 MaxLHS.clearSignBit(); 3711 APInt MaxRHS = ~RHSKnown.Zero; 3712 MaxRHS.clearSignBit(); 3713 APInt Result = std::move(MaxLHS) + std::move(MaxRHS); 3714 return Result.isSignBitClear(); 3715 } 3716 3717 // If either of the values is known to be negative, adding them can only 3718 // overflow if the second is also negative, so we can assume that. 3719 // Two negative number will only overflow if there is no carry to the sign 3720 // bit, so we can check if even when the values are as small as possible 3721 // there is overflow to the sign bit. 3722 if (LHSKnown.isNegative() || RHSKnown.isNegative()) { 3723 APInt MinLHS = LHSKnown.One; 3724 MinLHS.clearSignBit(); 3725 APInt MinRHS = RHSKnown.One; 3726 MinRHS.clearSignBit(); 3727 APInt Result = std::move(MinLHS) + std::move(MinRHS); 3728 return Result.isSignBitSet(); 3729 } 3730 3731 // If we reached here it means that we know nothing about the sign bits. 3732 // In this case we can't know if there will be an overflow, since by 3733 // changing the sign bits any two values can be made to overflow. 3734 return false; 3735 } 3736 3737 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 3738 const Value *RHS, 3739 const AddOperator *Add, 3740 const DataLayout &DL, 3741 AssumptionCache *AC, 3742 const Instruction *CxtI, 3743 const DominatorTree *DT) { 3744 if (Add && Add->hasNoSignedWrap()) { 3745 return OverflowResult::NeverOverflows; 3746 } 3747 3748 // If LHS and RHS each have at least two sign bits, the addition will look 3749 // like 3750 // 3751 // XX..... + 3752 // YY..... 3753 // 3754 // If the carry into the most significant position is 0, X and Y can't both 3755 // be 1 and therefore the carry out of the addition is also 0. 3756 // 3757 // If the carry into the most significant position is 1, X and Y can't both 3758 // be 0 and therefore the carry out of the addition is also 1. 3759 // 3760 // Since the carry into the most significant position is always equal to 3761 // the carry out of the addition, there is no signed overflow. 3762 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 3763 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 3764 return OverflowResult::NeverOverflows; 3765 3766 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 3767 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 3768 3769 if (checkRippleForSignedAdd(LHSKnown, RHSKnown)) 3770 return OverflowResult::NeverOverflows; 3771 3772 // The remaining code needs Add to be available. Early returns if not so. 3773 if (!Add) 3774 return OverflowResult::MayOverflow; 3775 3776 // If the sign of Add is the same as at least one of the operands, this add 3777 // CANNOT overflow. This is particularly useful when the sum is 3778 // @llvm.assume'ed non-negative rather than proved so from analyzing its 3779 // operands. 3780 bool LHSOrRHSKnownNonNegative = 3781 (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()); 3782 bool LHSOrRHSKnownNegative = 3783 (LHSKnown.isNegative() || RHSKnown.isNegative()); 3784 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 3785 KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT); 3786 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 3787 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) { 3788 return OverflowResult::NeverOverflows; 3789 } 3790 } 3791 3792 return OverflowResult::MayOverflow; 3793 } 3794 3795 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, 3796 const DominatorTree &DT) { 3797 #ifndef NDEBUG 3798 auto IID = II->getIntrinsicID(); 3799 assert((IID == Intrinsic::sadd_with_overflow || 3800 IID == Intrinsic::uadd_with_overflow || 3801 IID == Intrinsic::ssub_with_overflow || 3802 IID == Intrinsic::usub_with_overflow || 3803 IID == Intrinsic::smul_with_overflow || 3804 IID == Intrinsic::umul_with_overflow) && 3805 "Not an overflow intrinsic!"); 3806 #endif 3807 3808 SmallVector<const BranchInst *, 2> GuardingBranches; 3809 SmallVector<const ExtractValueInst *, 2> Results; 3810 3811 for (const User *U : II->users()) { 3812 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 3813 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 3814 3815 if (EVI->getIndices()[0] == 0) 3816 Results.push_back(EVI); 3817 else { 3818 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 3819 3820 for (const auto *U : EVI->users()) 3821 if (const auto *B = dyn_cast<BranchInst>(U)) { 3822 assert(B->isConditional() && "How else is it using an i1?"); 3823 GuardingBranches.push_back(B); 3824 } 3825 } 3826 } else { 3827 // We are using the aggregate directly in a way we don't want to analyze 3828 // here (storing it to a global, say). 3829 return false; 3830 } 3831 } 3832 3833 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 3834 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 3835 if (!NoWrapEdge.isSingleEdge()) 3836 return false; 3837 3838 // Check if all users of the add are provably no-wrap. 3839 for (const auto *Result : Results) { 3840 // If the extractvalue itself is not executed on overflow, the we don't 3841 // need to check each use separately, since domination is transitive. 3842 if (DT.dominates(NoWrapEdge, Result->getParent())) 3843 continue; 3844 3845 for (auto &RU : Result->uses()) 3846 if (!DT.dominates(NoWrapEdge, RU)) 3847 return false; 3848 } 3849 3850 return true; 3851 }; 3852 3853 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); 3854 } 3855 3856 3857 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 3858 const DataLayout &DL, 3859 AssumptionCache *AC, 3860 const Instruction *CxtI, 3861 const DominatorTree *DT) { 3862 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 3863 Add, DL, AC, CxtI, DT); 3864 } 3865 3866 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 3867 const Value *RHS, 3868 const DataLayout &DL, 3869 AssumptionCache *AC, 3870 const Instruction *CxtI, 3871 const DominatorTree *DT) { 3872 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 3873 } 3874 3875 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 3876 // A memory operation returns normally if it isn't volatile. A volatile 3877 // operation is allowed to trap. 3878 // 3879 // An atomic operation isn't guaranteed to return in a reasonable amount of 3880 // time because it's possible for another thread to interfere with it for an 3881 // arbitrary length of time, but programs aren't allowed to rely on that. 3882 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 3883 return !LI->isVolatile(); 3884 if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 3885 return !SI->isVolatile(); 3886 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 3887 return !CXI->isVolatile(); 3888 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 3889 return !RMWI->isVolatile(); 3890 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I)) 3891 return !MII->isVolatile(); 3892 3893 // If there is no successor, then execution can't transfer to it. 3894 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) 3895 return !CRI->unwindsToCaller(); 3896 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) 3897 return !CatchSwitch->unwindsToCaller(); 3898 if (isa<ResumeInst>(I)) 3899 return false; 3900 if (isa<ReturnInst>(I)) 3901 return false; 3902 if (isa<UnreachableInst>(I)) 3903 return false; 3904 3905 // Calls can throw, or contain an infinite loop, or kill the process. 3906 if (auto CS = ImmutableCallSite(I)) { 3907 // Call sites that throw have implicit non-local control flow. 3908 if (!CS.doesNotThrow()) 3909 return false; 3910 3911 // Non-throwing call sites can loop infinitely, call exit/pthread_exit 3912 // etc. and thus not return. However, LLVM already assumes that 3913 // 3914 // - Thread exiting actions are modeled as writes to memory invisible to 3915 // the program. 3916 // 3917 // - Loops that don't have side effects (side effects are volatile/atomic 3918 // stores and IO) always terminate (see http://llvm.org/PR965). 3919 // Furthermore IO itself is also modeled as writes to memory invisible to 3920 // the program. 3921 // 3922 // We rely on those assumptions here, and use the memory effects of the call 3923 // target as a proxy for checking that it always returns. 3924 3925 // FIXME: This isn't aggressive enough; a call which only writes to a global 3926 // is guaranteed to return. 3927 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() || 3928 match(I, m_Intrinsic<Intrinsic::assume>()) || 3929 match(I, m_Intrinsic<Intrinsic::sideeffect>()); 3930 } 3931 3932 // Other instructions return normally. 3933 return true; 3934 } 3935 3936 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 3937 const Loop *L) { 3938 // The loop header is guaranteed to be executed for every iteration. 3939 // 3940 // FIXME: Relax this constraint to cover all basic blocks that are 3941 // guaranteed to be executed at every iteration. 3942 if (I->getParent() != L->getHeader()) return false; 3943 3944 for (const Instruction &LI : *L->getHeader()) { 3945 if (&LI == I) return true; 3946 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 3947 } 3948 llvm_unreachable("Instruction not contained in its own parent basic block."); 3949 } 3950 3951 bool llvm::propagatesFullPoison(const Instruction *I) { 3952 switch (I->getOpcode()) { 3953 case Instruction::Add: 3954 case Instruction::Sub: 3955 case Instruction::Xor: 3956 case Instruction::Trunc: 3957 case Instruction::BitCast: 3958 case Instruction::AddrSpaceCast: 3959 case Instruction::Mul: 3960 case Instruction::Shl: 3961 case Instruction::GetElementPtr: 3962 // These operations all propagate poison unconditionally. Note that poison 3963 // is not any particular value, so xor or subtraction of poison with 3964 // itself still yields poison, not zero. 3965 return true; 3966 3967 case Instruction::AShr: 3968 case Instruction::SExt: 3969 // For these operations, one bit of the input is replicated across 3970 // multiple output bits. A replicated poison bit is still poison. 3971 return true; 3972 3973 case Instruction::ICmp: 3974 // Comparing poison with any value yields poison. This is why, for 3975 // instance, x s< (x +nsw 1) can be folded to true. 3976 return true; 3977 3978 default: 3979 return false; 3980 } 3981 } 3982 3983 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) { 3984 switch (I->getOpcode()) { 3985 case Instruction::Store: 3986 return cast<StoreInst>(I)->getPointerOperand(); 3987 3988 case Instruction::Load: 3989 return cast<LoadInst>(I)->getPointerOperand(); 3990 3991 case Instruction::AtomicCmpXchg: 3992 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 3993 3994 case Instruction::AtomicRMW: 3995 return cast<AtomicRMWInst>(I)->getPointerOperand(); 3996 3997 case Instruction::UDiv: 3998 case Instruction::SDiv: 3999 case Instruction::URem: 4000 case Instruction::SRem: 4001 return I->getOperand(1); 4002 4003 default: 4004 return nullptr; 4005 } 4006 } 4007 4008 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) { 4009 // We currently only look for uses of poison values within the same basic 4010 // block, as that makes it easier to guarantee that the uses will be 4011 // executed given that PoisonI is executed. 4012 // 4013 // FIXME: Expand this to consider uses beyond the same basic block. To do 4014 // this, look out for the distinction between post-dominance and strong 4015 // post-dominance. 4016 const BasicBlock *BB = PoisonI->getParent(); 4017 4018 // Set of instructions that we have proved will yield poison if PoisonI 4019 // does. 4020 SmallSet<const Value *, 16> YieldsPoison; 4021 SmallSet<const BasicBlock *, 4> Visited; 4022 YieldsPoison.insert(PoisonI); 4023 Visited.insert(PoisonI->getParent()); 4024 4025 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end(); 4026 4027 unsigned Iter = 0; 4028 while (Iter++ < MaxDepth) { 4029 for (auto &I : make_range(Begin, End)) { 4030 if (&I != PoisonI) { 4031 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I); 4032 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) 4033 return true; 4034 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 4035 return false; 4036 } 4037 4038 // Mark poison that propagates from I through uses of I. 4039 if (YieldsPoison.count(&I)) { 4040 for (const User *User : I.users()) { 4041 const Instruction *UserI = cast<Instruction>(User); 4042 if (propagatesFullPoison(UserI)) 4043 YieldsPoison.insert(User); 4044 } 4045 } 4046 } 4047 4048 if (auto *NextBB = BB->getSingleSuccessor()) { 4049 if (Visited.insert(NextBB).second) { 4050 BB = NextBB; 4051 Begin = BB->getFirstNonPHI()->getIterator(); 4052 End = BB->end(); 4053 continue; 4054 } 4055 } 4056 4057 break; 4058 } 4059 return false; 4060 } 4061 4062 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 4063 if (FMF.noNaNs()) 4064 return true; 4065 4066 if (auto *C = dyn_cast<ConstantFP>(V)) 4067 return !C->isNaN(); 4068 return false; 4069 } 4070 4071 static bool isKnownNonZero(const Value *V) { 4072 if (auto *C = dyn_cast<ConstantFP>(V)) 4073 return !C->isZero(); 4074 return false; 4075 } 4076 4077 /// Match clamp pattern for float types without care about NaNs or signed zeros. 4078 /// Given non-min/max outer cmp/select from the clamp pattern this 4079 /// function recognizes if it can be substitued by a "canonical" min/max 4080 /// pattern. 4081 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, 4082 Value *CmpLHS, Value *CmpRHS, 4083 Value *TrueVal, Value *FalseVal, 4084 Value *&LHS, Value *&RHS) { 4085 // Try to match 4086 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) 4087 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) 4088 // and return description of the outer Max/Min. 4089 4090 // First, check if select has inverse order: 4091 if (CmpRHS == FalseVal) { 4092 std::swap(TrueVal, FalseVal); 4093 Pred = CmpInst::getInversePredicate(Pred); 4094 } 4095 4096 // Assume success now. If there's no match, callers should not use these anyway. 4097 LHS = TrueVal; 4098 RHS = FalseVal; 4099 4100 const APFloat *FC1; 4101 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) 4102 return {SPF_UNKNOWN, SPNB_NA, false}; 4103 4104 const APFloat *FC2; 4105 switch (Pred) { 4106 case CmpInst::FCMP_OLT: 4107 case CmpInst::FCMP_OLE: 4108 case CmpInst::FCMP_ULT: 4109 case CmpInst::FCMP_ULE: 4110 if (match(FalseVal, 4111 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), 4112 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4113 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan) 4114 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; 4115 break; 4116 case CmpInst::FCMP_OGT: 4117 case CmpInst::FCMP_OGE: 4118 case CmpInst::FCMP_UGT: 4119 case CmpInst::FCMP_UGE: 4120 if (match(FalseVal, 4121 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), 4122 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4123 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan) 4124 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; 4125 break; 4126 default: 4127 break; 4128 } 4129 4130 return {SPF_UNKNOWN, SPNB_NA, false}; 4131 } 4132 4133 /// Recognize variations of: 4134 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 4135 static SelectPatternResult matchClamp(CmpInst::Predicate Pred, 4136 Value *CmpLHS, Value *CmpRHS, 4137 Value *TrueVal, Value *FalseVal) { 4138 // Swap the select operands and predicate to match the patterns below. 4139 if (CmpRHS != TrueVal) { 4140 Pred = ICmpInst::getSwappedPredicate(Pred); 4141 std::swap(TrueVal, FalseVal); 4142 } 4143 const APInt *C1; 4144 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 4145 const APInt *C2; 4146 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 4147 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 4148 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 4149 return {SPF_SMAX, SPNB_NA, false}; 4150 4151 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 4152 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 4153 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 4154 return {SPF_SMIN, SPNB_NA, false}; 4155 4156 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 4157 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 4158 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 4159 return {SPF_UMAX, SPNB_NA, false}; 4160 4161 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 4162 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 4163 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 4164 return {SPF_UMIN, SPNB_NA, false}; 4165 } 4166 return {SPF_UNKNOWN, SPNB_NA, false}; 4167 } 4168 4169 /// Recognize variations of: 4170 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) 4171 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, 4172 Value *CmpLHS, Value *CmpRHS, 4173 Value *TVal, Value *FVal, 4174 unsigned Depth) { 4175 // TODO: Allow FP min/max with nnan/nsz. 4176 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison"); 4177 4178 Value *A, *B; 4179 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); 4180 if (!SelectPatternResult::isMinOrMax(L.Flavor)) 4181 return {SPF_UNKNOWN, SPNB_NA, false}; 4182 4183 Value *C, *D; 4184 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); 4185 if (L.Flavor != R.Flavor) 4186 return {SPF_UNKNOWN, SPNB_NA, false}; 4187 4188 // We have something like: x Pred y ? min(a, b) : min(c, d). 4189 // Try to match the compare to the min/max operations of the select operands. 4190 // First, make sure we have the right compare predicate. 4191 switch (L.Flavor) { 4192 case SPF_SMIN: 4193 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { 4194 Pred = ICmpInst::getSwappedPredicate(Pred); 4195 std::swap(CmpLHS, CmpRHS); 4196 } 4197 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 4198 break; 4199 return {SPF_UNKNOWN, SPNB_NA, false}; 4200 case SPF_SMAX: 4201 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { 4202 Pred = ICmpInst::getSwappedPredicate(Pred); 4203 std::swap(CmpLHS, CmpRHS); 4204 } 4205 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 4206 break; 4207 return {SPF_UNKNOWN, SPNB_NA, false}; 4208 case SPF_UMIN: 4209 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { 4210 Pred = ICmpInst::getSwappedPredicate(Pred); 4211 std::swap(CmpLHS, CmpRHS); 4212 } 4213 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 4214 break; 4215 return {SPF_UNKNOWN, SPNB_NA, false}; 4216 case SPF_UMAX: 4217 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 4218 Pred = ICmpInst::getSwappedPredicate(Pred); 4219 std::swap(CmpLHS, CmpRHS); 4220 } 4221 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 4222 break; 4223 return {SPF_UNKNOWN, SPNB_NA, false}; 4224 default: 4225 return {SPF_UNKNOWN, SPNB_NA, false}; 4226 } 4227 4228 // If there is a common operand in the already matched min/max and the other 4229 // min/max operands match the compare operands (either directly or inverted), 4230 // then this is min/max of the same flavor. 4231 4232 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4233 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4234 if (D == B) { 4235 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4236 match(A, m_Not(m_Specific(CmpRHS))))) 4237 return {L.Flavor, SPNB_NA, false}; 4238 } 4239 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4240 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4241 if (C == B) { 4242 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4243 match(A, m_Not(m_Specific(CmpRHS))))) 4244 return {L.Flavor, SPNB_NA, false}; 4245 } 4246 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4247 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4248 if (D == A) { 4249 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4250 match(B, m_Not(m_Specific(CmpRHS))))) 4251 return {L.Flavor, SPNB_NA, false}; 4252 } 4253 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4254 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4255 if (C == A) { 4256 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4257 match(B, m_Not(m_Specific(CmpRHS))))) 4258 return {L.Flavor, SPNB_NA, false}; 4259 } 4260 4261 return {SPF_UNKNOWN, SPNB_NA, false}; 4262 } 4263 4264 /// Match non-obvious integer minimum and maximum sequences. 4265 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 4266 Value *CmpLHS, Value *CmpRHS, 4267 Value *TrueVal, Value *FalseVal, 4268 Value *&LHS, Value *&RHS, 4269 unsigned Depth) { 4270 // Assume success. If there's no match, callers should not use these anyway. 4271 LHS = TrueVal; 4272 RHS = FalseVal; 4273 4274 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); 4275 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4276 return SPR; 4277 4278 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); 4279 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4280 return SPR; 4281 4282 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 4283 return {SPF_UNKNOWN, SPNB_NA, false}; 4284 4285 // Z = X -nsw Y 4286 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) 4287 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) 4288 if (match(TrueVal, m_Zero()) && 4289 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4290 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4291 4292 // Z = X -nsw Y 4293 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) 4294 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) 4295 if (match(FalseVal, m_Zero()) && 4296 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4297 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4298 4299 const APInt *C1; 4300 if (!match(CmpRHS, m_APInt(C1))) 4301 return {SPF_UNKNOWN, SPNB_NA, false}; 4302 4303 // An unsigned min/max can be written with a signed compare. 4304 const APInt *C2; 4305 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 4306 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 4307 // Is the sign bit set? 4308 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 4309 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 4310 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() && 4311 C2->isMaxSignedValue()) 4312 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4313 4314 // Is the sign bit clear? 4315 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 4316 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 4317 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && 4318 C2->isMinSignedValue()) 4319 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4320 } 4321 4322 // Look through 'not' ops to find disguised signed min/max. 4323 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C) 4324 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C) 4325 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) && 4326 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2) 4327 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4328 4329 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X) 4330 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X) 4331 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) && 4332 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2) 4333 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4334 4335 return {SPF_UNKNOWN, SPNB_NA, false}; 4336 } 4337 4338 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 4339 FastMathFlags FMF, 4340 Value *CmpLHS, Value *CmpRHS, 4341 Value *TrueVal, Value *FalseVal, 4342 Value *&LHS, Value *&RHS, 4343 unsigned Depth) { 4344 LHS = CmpLHS; 4345 RHS = CmpRHS; 4346 4347 // Signed zero may return inconsistent results between implementations. 4348 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 4349 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 4350 // Therefore, we behave conservatively and only proceed if at least one of the 4351 // operands is known to not be zero or if we don't care about signed zero. 4352 switch (Pred) { 4353 default: break; 4354 // FIXME: Include OGT/OLT/UGT/ULT. 4355 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 4356 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 4357 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4358 !isKnownNonZero(CmpRHS)) 4359 return {SPF_UNKNOWN, SPNB_NA, false}; 4360 } 4361 4362 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 4363 bool Ordered = false; 4364 4365 // When given one NaN and one non-NaN input: 4366 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 4367 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 4368 // ordered comparison fails), which could be NaN or non-NaN. 4369 // so here we discover exactly what NaN behavior is required/accepted. 4370 if (CmpInst::isFPPredicate(Pred)) { 4371 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 4372 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 4373 4374 if (LHSSafe && RHSSafe) { 4375 // Both operands are known non-NaN. 4376 NaNBehavior = SPNB_RETURNS_ANY; 4377 } else if (CmpInst::isOrdered(Pred)) { 4378 // An ordered comparison will return false when given a NaN, so it 4379 // returns the RHS. 4380 Ordered = true; 4381 if (LHSSafe) 4382 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 4383 NaNBehavior = SPNB_RETURNS_NAN; 4384 else if (RHSSafe) 4385 NaNBehavior = SPNB_RETURNS_OTHER; 4386 else 4387 // Completely unsafe. 4388 return {SPF_UNKNOWN, SPNB_NA, false}; 4389 } else { 4390 Ordered = false; 4391 // An unordered comparison will return true when given a NaN, so it 4392 // returns the LHS. 4393 if (LHSSafe) 4394 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 4395 NaNBehavior = SPNB_RETURNS_OTHER; 4396 else if (RHSSafe) 4397 NaNBehavior = SPNB_RETURNS_NAN; 4398 else 4399 // Completely unsafe. 4400 return {SPF_UNKNOWN, SPNB_NA, false}; 4401 } 4402 } 4403 4404 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 4405 std::swap(CmpLHS, CmpRHS); 4406 Pred = CmpInst::getSwappedPredicate(Pred); 4407 if (NaNBehavior == SPNB_RETURNS_NAN) 4408 NaNBehavior = SPNB_RETURNS_OTHER; 4409 else if (NaNBehavior == SPNB_RETURNS_OTHER) 4410 NaNBehavior = SPNB_RETURNS_NAN; 4411 Ordered = !Ordered; 4412 } 4413 4414 // ([if]cmp X, Y) ? X : Y 4415 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 4416 switch (Pred) { 4417 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 4418 case ICmpInst::ICMP_UGT: 4419 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 4420 case ICmpInst::ICMP_SGT: 4421 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 4422 case ICmpInst::ICMP_ULT: 4423 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 4424 case ICmpInst::ICMP_SLT: 4425 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 4426 case FCmpInst::FCMP_UGT: 4427 case FCmpInst::FCMP_UGE: 4428 case FCmpInst::FCMP_OGT: 4429 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 4430 case FCmpInst::FCMP_ULT: 4431 case FCmpInst::FCMP_ULE: 4432 case FCmpInst::FCMP_OLT: 4433 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 4434 } 4435 } 4436 4437 const APInt *C1; 4438 if (match(CmpRHS, m_APInt(C1))) { 4439 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) || 4440 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) { 4441 4442 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X 4443 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X 4444 if (Pred == ICmpInst::ICMP_SGT && 4445 (C1->isNullValue() || C1->isAllOnesValue())) { 4446 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 4447 } 4448 4449 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X 4450 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X 4451 if (Pred == ICmpInst::ICMP_SLT && 4452 (C1->isNullValue() || C1->isOneValue())) { 4453 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 4454 } 4455 } 4456 } 4457 4458 if (CmpInst::isIntPredicate(Pred)) 4459 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); 4460 4461 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar 4462 // may return either -0.0 or 0.0, so fcmp/select pair has stricter 4463 // semantics than minNum. Be conservative in such case. 4464 if (NaNBehavior != SPNB_RETURNS_ANY || 4465 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4466 !isKnownNonZero(CmpRHS))) 4467 return {SPF_UNKNOWN, SPNB_NA, false}; 4468 4469 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 4470 } 4471 4472 /// Helps to match a select pattern in case of a type mismatch. 4473 /// 4474 /// The function processes the case when type of true and false values of a 4475 /// select instruction differs from type of the cmp instruction operands because 4476 /// of a cast instructon. The function checks if it is legal to move the cast 4477 /// operation after "select". If yes, it returns the new second value of 4478 /// "select" (with the assumption that cast is moved): 4479 /// 1. As operand of cast instruction when both values of "select" are same cast 4480 /// instructions. 4481 /// 2. As restored constant (by applying reverse cast operation) when the first 4482 /// value of the "select" is a cast operation and the second value is a 4483 /// constant. 4484 /// NOTE: We return only the new second value because the first value could be 4485 /// accessed as operand of cast instruction. 4486 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 4487 Instruction::CastOps *CastOp) { 4488 auto *Cast1 = dyn_cast<CastInst>(V1); 4489 if (!Cast1) 4490 return nullptr; 4491 4492 *CastOp = Cast1->getOpcode(); 4493 Type *SrcTy = Cast1->getSrcTy(); 4494 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 4495 // If V1 and V2 are both the same cast from the same type, look through V1. 4496 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 4497 return Cast2->getOperand(0); 4498 return nullptr; 4499 } 4500 4501 auto *C = dyn_cast<Constant>(V2); 4502 if (!C) 4503 return nullptr; 4504 4505 Constant *CastedTo = nullptr; 4506 switch (*CastOp) { 4507 case Instruction::ZExt: 4508 if (CmpI->isUnsigned()) 4509 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 4510 break; 4511 case Instruction::SExt: 4512 if (CmpI->isSigned()) 4513 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 4514 break; 4515 case Instruction::Trunc: 4516 Constant *CmpConst; 4517 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && 4518 CmpConst->getType() == SrcTy) { 4519 // Here we have the following case: 4520 // 4521 // %cond = cmp iN %x, CmpConst 4522 // %tr = trunc iN %x to iK 4523 // %narrowsel = select i1 %cond, iK %t, iK C 4524 // 4525 // We can always move trunc after select operation: 4526 // 4527 // %cond = cmp iN %x, CmpConst 4528 // %widesel = select i1 %cond, iN %x, iN CmpConst 4529 // %tr = trunc iN %widesel to iK 4530 // 4531 // Note that C could be extended in any way because we don't care about 4532 // upper bits after truncation. It can't be abs pattern, because it would 4533 // look like: 4534 // 4535 // select i1 %cond, x, -x. 4536 // 4537 // So only min/max pattern could be matched. Such match requires widened C 4538 // == CmpConst. That is why set widened C = CmpConst, condition trunc 4539 // CmpConst == C is checked below. 4540 CastedTo = CmpConst; 4541 } else { 4542 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 4543 } 4544 break; 4545 case Instruction::FPTrunc: 4546 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 4547 break; 4548 case Instruction::FPExt: 4549 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 4550 break; 4551 case Instruction::FPToUI: 4552 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 4553 break; 4554 case Instruction::FPToSI: 4555 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 4556 break; 4557 case Instruction::UIToFP: 4558 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 4559 break; 4560 case Instruction::SIToFP: 4561 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 4562 break; 4563 default: 4564 break; 4565 } 4566 4567 if (!CastedTo) 4568 return nullptr; 4569 4570 // Make sure the cast doesn't lose any information. 4571 Constant *CastedBack = 4572 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 4573 if (CastedBack != C) 4574 return nullptr; 4575 4576 return CastedTo; 4577 } 4578 4579 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 4580 Instruction::CastOps *CastOp, 4581 unsigned Depth) { 4582 if (Depth >= MaxDepth) 4583 return {SPF_UNKNOWN, SPNB_NA, false}; 4584 4585 SelectInst *SI = dyn_cast<SelectInst>(V); 4586 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 4587 4588 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 4589 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 4590 4591 CmpInst::Predicate Pred = CmpI->getPredicate(); 4592 Value *CmpLHS = CmpI->getOperand(0); 4593 Value *CmpRHS = CmpI->getOperand(1); 4594 Value *TrueVal = SI->getTrueValue(); 4595 Value *FalseVal = SI->getFalseValue(); 4596 FastMathFlags FMF; 4597 if (isa<FPMathOperator>(CmpI)) 4598 FMF = CmpI->getFastMathFlags(); 4599 4600 // Bail out early. 4601 if (CmpI->isEquality()) 4602 return {SPF_UNKNOWN, SPNB_NA, false}; 4603 4604 // Deal with type mismatches. 4605 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 4606 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { 4607 // If this is a potential fmin/fmax with a cast to integer, then ignore 4608 // -0.0 because there is no corresponding integer value. 4609 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 4610 FMF.setNoSignedZeros(); 4611 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4612 cast<CastInst>(TrueVal)->getOperand(0), C, 4613 LHS, RHS, Depth); 4614 } 4615 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { 4616 // If this is a potential fmin/fmax with a cast to integer, then ignore 4617 // -0.0 because there is no corresponding integer value. 4618 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 4619 FMF.setNoSignedZeros(); 4620 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4621 C, cast<CastInst>(FalseVal)->getOperand(0), 4622 LHS, RHS, Depth); 4623 } 4624 } 4625 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 4626 LHS, RHS, Depth); 4627 } 4628 4629 /// Return true if "icmp Pred LHS RHS" is always true. 4630 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, 4631 const Value *RHS, const DataLayout &DL, 4632 unsigned Depth) { 4633 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 4634 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 4635 return true; 4636 4637 switch (Pred) { 4638 default: 4639 return false; 4640 4641 case CmpInst::ICMP_SLE: { 4642 const APInt *C; 4643 4644 // LHS s<= LHS +_{nsw} C if C >= 0 4645 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 4646 return !C->isNegative(); 4647 return false; 4648 } 4649 4650 case CmpInst::ICMP_ULE: { 4651 const APInt *C; 4652 4653 // LHS u<= LHS +_{nuw} C for any C 4654 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 4655 return true; 4656 4657 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 4658 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 4659 const Value *&X, 4660 const APInt *&CA, const APInt *&CB) { 4661 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 4662 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 4663 return true; 4664 4665 // If X & C == 0 then (X | C) == X +_{nuw} C 4666 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 4667 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 4668 KnownBits Known(CA->getBitWidth()); 4669 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, 4670 /*CxtI*/ nullptr, /*DT*/ nullptr); 4671 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 4672 return true; 4673 } 4674 4675 return false; 4676 }; 4677 4678 const Value *X; 4679 const APInt *CLHS, *CRHS; 4680 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 4681 return CLHS->ule(*CRHS); 4682 4683 return false; 4684 } 4685 } 4686 } 4687 4688 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 4689 /// ALHS ARHS" is true. Otherwise, return None. 4690 static Optional<bool> 4691 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 4692 const Value *ARHS, const Value *BLHS, const Value *BRHS, 4693 const DataLayout &DL, unsigned Depth) { 4694 switch (Pred) { 4695 default: 4696 return None; 4697 4698 case CmpInst::ICMP_SLT: 4699 case CmpInst::ICMP_SLE: 4700 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && 4701 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) 4702 return true; 4703 return None; 4704 4705 case CmpInst::ICMP_ULT: 4706 case CmpInst::ICMP_ULE: 4707 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && 4708 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) 4709 return true; 4710 return None; 4711 } 4712 } 4713 4714 /// Return true if the operands of the two compares match. IsSwappedOps is true 4715 /// when the operands match, but are swapped. 4716 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 4717 const Value *BLHS, const Value *BRHS, 4718 bool &IsSwappedOps) { 4719 4720 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 4721 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 4722 return IsMatchingOps || IsSwappedOps; 4723 } 4724 4725 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is 4726 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS 4727 /// BRHS" is false. Otherwise, return None if we can't infer anything. 4728 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 4729 const Value *ALHS, 4730 const Value *ARHS, 4731 CmpInst::Predicate BPred, 4732 const Value *BLHS, 4733 const Value *BRHS, 4734 bool IsSwappedOps) { 4735 // Canonicalize the operands so they're matching. 4736 if (IsSwappedOps) { 4737 std::swap(BLHS, BRHS); 4738 BPred = ICmpInst::getSwappedPredicate(BPred); 4739 } 4740 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 4741 return true; 4742 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 4743 return false; 4744 4745 return None; 4746 } 4747 4748 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is 4749 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS 4750 /// C2" is false. Otherwise, return None if we can't infer anything. 4751 static Optional<bool> 4752 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS, 4753 const ConstantInt *C1, 4754 CmpInst::Predicate BPred, 4755 const Value *BLHS, const ConstantInt *C2) { 4756 assert(ALHS == BLHS && "LHS operands must match."); 4757 ConstantRange DomCR = 4758 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 4759 ConstantRange CR = 4760 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); 4761 ConstantRange Intersection = DomCR.intersectWith(CR); 4762 ConstantRange Difference = DomCR.difference(CR); 4763 if (Intersection.isEmptySet()) 4764 return false; 4765 if (Difference.isEmptySet()) 4766 return true; 4767 return None; 4768 } 4769 4770 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 4771 /// false. Otherwise, return None if we can't infer anything. 4772 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS, 4773 const ICmpInst *RHS, 4774 const DataLayout &DL, bool LHSIsTrue, 4775 unsigned Depth) { 4776 Value *ALHS = LHS->getOperand(0); 4777 Value *ARHS = LHS->getOperand(1); 4778 // The rest of the logic assumes the LHS condition is true. If that's not the 4779 // case, invert the predicate to make it so. 4780 ICmpInst::Predicate APred = 4781 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); 4782 4783 Value *BLHS = RHS->getOperand(0); 4784 Value *BRHS = RHS->getOperand(1); 4785 ICmpInst::Predicate BPred = RHS->getPredicate(); 4786 4787 // Can we infer anything when the two compares have matching operands? 4788 bool IsSwappedOps; 4789 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) { 4790 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 4791 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps)) 4792 return Implication; 4793 // No amount of additional analysis will infer the second condition, so 4794 // early exit. 4795 return None; 4796 } 4797 4798 // Can we infer anything when the LHS operands match and the RHS operands are 4799 // constants (not necessarily matching)? 4800 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 4801 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 4802 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS, 4803 cast<ConstantInt>(BRHS))) 4804 return Implication; 4805 // No amount of additional analysis will infer the second condition, so 4806 // early exit. 4807 return None; 4808 } 4809 4810 if (APred == BPred) 4811 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth); 4812 return None; 4813 } 4814 4815 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 4816 /// false. Otherwise, return None if we can't infer anything. We expect the 4817 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction. 4818 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS, 4819 const ICmpInst *RHS, 4820 const DataLayout &DL, bool LHSIsTrue, 4821 unsigned Depth) { 4822 // The LHS must be an 'or' or an 'and' instruction. 4823 assert((LHS->getOpcode() == Instruction::And || 4824 LHS->getOpcode() == Instruction::Or) && 4825 "Expected LHS to be 'and' or 'or'."); 4826 4827 assert(Depth <= MaxDepth && "Hit recursion limit"); 4828 4829 // If the result of an 'or' is false, then we know both legs of the 'or' are 4830 // false. Similarly, if the result of an 'and' is true, then we know both 4831 // legs of the 'and' are true. 4832 Value *ALHS, *ARHS; 4833 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) || 4834 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) { 4835 // FIXME: Make this non-recursion. 4836 if (Optional<bool> Implication = 4837 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1)) 4838 return Implication; 4839 if (Optional<bool> Implication = 4840 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1)) 4841 return Implication; 4842 return None; 4843 } 4844 return None; 4845 } 4846 4847 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 4848 const DataLayout &DL, bool LHSIsTrue, 4849 unsigned Depth) { 4850 // Bail out when we hit the limit. 4851 if (Depth == MaxDepth) 4852 return None; 4853 4854 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for 4855 // example. 4856 if (LHS->getType() != RHS->getType()) 4857 return None; 4858 4859 Type *OpTy = LHS->getType(); 4860 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!"); 4861 4862 // LHS ==> RHS by definition 4863 if (LHS == RHS) 4864 return LHSIsTrue; 4865 4866 // FIXME: Extending the code below to handle vectors. 4867 if (OpTy->isVectorTy()) 4868 return None; 4869 4870 assert(OpTy->isIntegerTy(1) && "implied by above"); 4871 4872 // Both LHS and RHS are icmps. 4873 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); 4874 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS); 4875 if (LHSCmp && RHSCmp) 4876 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth); 4877 4878 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be 4879 // an icmp. FIXME: Add support for and/or on the RHS. 4880 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS); 4881 if (LHSBO && RHSCmp) { 4882 if ((LHSBO->getOpcode() == Instruction::And || 4883 LHSBO->getOpcode() == Instruction::Or)) 4884 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth); 4885 } 4886 return None; 4887 } 4888