1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains routines that help analyze properties that chains of 11 // computations have. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/InstructionSimplify.h" 30 #include "llvm/Analysis/Loads.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 33 #include "llvm/Analysis/TargetLibraryInfo.h" 34 #include "llvm/IR/Argument.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/CallSite.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/ConstantRange.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/DiagnosticInfo.h" 44 #include "llvm/IR/Dominators.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/GetElementPtrTypeIterator.h" 47 #include "llvm/IR/GlobalAlias.h" 48 #include "llvm/IR/GlobalValue.h" 49 #include "llvm/IR/GlobalVariable.h" 50 #include "llvm/IR/InstrTypes.h" 51 #include "llvm/IR/Instruction.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/IntrinsicInst.h" 54 #include "llvm/IR/Intrinsics.h" 55 #include "llvm/IR/LLVMContext.h" 56 #include "llvm/IR/Metadata.h" 57 #include "llvm/IR/Module.h" 58 #include "llvm/IR/Operator.h" 59 #include "llvm/IR/PatternMatch.h" 60 #include "llvm/IR/Type.h" 61 #include "llvm/IR/User.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/Support/Casting.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/Compiler.h" 66 #include "llvm/Support/ErrorHandling.h" 67 #include "llvm/Support/KnownBits.h" 68 #include "llvm/Support/MathExtras.h" 69 #include <algorithm> 70 #include <array> 71 #include <cassert> 72 #include <cstdint> 73 #include <iterator> 74 #include <utility> 75 76 using namespace llvm; 77 using namespace llvm::PatternMatch; 78 79 const unsigned MaxDepth = 6; 80 81 // Controls the number of uses of the value searched for possible 82 // dominating comparisons. 83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 84 cl::Hidden, cl::init(20)); 85 86 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 87 /// returns the element type's bitwidth. 88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 89 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 90 return BitWidth; 91 92 return DL.getIndexTypeSizeInBits(Ty); 93 } 94 95 namespace { 96 97 // Simplifying using an assume can only be done in a particular control-flow 98 // context (the context instruction provides that context). If an assume and 99 // the context instruction are not in the same block then the DT helps in 100 // figuring out if we can use it. 101 struct Query { 102 const DataLayout &DL; 103 AssumptionCache *AC; 104 const Instruction *CxtI; 105 const DominatorTree *DT; 106 107 // Unlike the other analyses, this may be a nullptr because not all clients 108 // provide it currently. 109 OptimizationRemarkEmitter *ORE; 110 111 /// Set of assumptions that should be excluded from further queries. 112 /// This is because of the potential for mutual recursion to cause 113 /// computeKnownBits to repeatedly visit the same assume intrinsic. The 114 /// classic case of this is assume(x = y), which will attempt to determine 115 /// bits in x from bits in y, which will attempt to determine bits in y from 116 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 117 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo 118 /// (all of which can call computeKnownBits), and so on. 119 std::array<const Value *, MaxDepth> Excluded; 120 121 unsigned NumExcluded = 0; 122 123 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 124 const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr) 125 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE) {} 126 127 Query(const Query &Q, const Value *NewExcl) 128 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), 129 NumExcluded(Q.NumExcluded) { 130 Excluded = Q.Excluded; 131 Excluded[NumExcluded++] = NewExcl; 132 assert(NumExcluded <= Excluded.size()); 133 } 134 135 bool isExcluded(const Value *Value) const { 136 if (NumExcluded == 0) 137 return false; 138 auto End = Excluded.begin() + NumExcluded; 139 return std::find(Excluded.begin(), End, Value) != End; 140 } 141 }; 142 143 } // end anonymous namespace 144 145 // Given the provided Value and, potentially, a context instruction, return 146 // the preferred context instruction (if any). 147 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 148 // If we've been provided with a context instruction, then use that (provided 149 // it has been inserted). 150 if (CxtI && CxtI->getParent()) 151 return CxtI; 152 153 // If the value is really an already-inserted instruction, then use that. 154 CxtI = dyn_cast<Instruction>(V); 155 if (CxtI && CxtI->getParent()) 156 return CxtI; 157 158 return nullptr; 159 } 160 161 static void computeKnownBits(const Value *V, KnownBits &Known, 162 unsigned Depth, const Query &Q); 163 164 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 165 const DataLayout &DL, unsigned Depth, 166 AssumptionCache *AC, const Instruction *CxtI, 167 const DominatorTree *DT, 168 OptimizationRemarkEmitter *ORE) { 169 ::computeKnownBits(V, Known, Depth, 170 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE)); 171 } 172 173 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 174 const Query &Q); 175 176 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 177 unsigned Depth, AssumptionCache *AC, 178 const Instruction *CxtI, 179 const DominatorTree *DT, 180 OptimizationRemarkEmitter *ORE) { 181 return ::computeKnownBits(V, Depth, 182 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE)); 183 } 184 185 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 186 const DataLayout &DL, 187 AssumptionCache *AC, const Instruction *CxtI, 188 const DominatorTree *DT) { 189 assert(LHS->getType() == RHS->getType() && 190 "LHS and RHS should have the same type"); 191 assert(LHS->getType()->isIntOrIntVectorTy() && 192 "LHS and RHS should be integers"); 193 // Look for an inverted mask: (X & ~M) op (Y & M). 194 Value *M; 195 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 196 match(RHS, m_c_And(m_Specific(M), m_Value()))) 197 return true; 198 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 199 match(LHS, m_c_And(m_Specific(M), m_Value()))) 200 return true; 201 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 202 KnownBits LHSKnown(IT->getBitWidth()); 203 KnownBits RHSKnown(IT->getBitWidth()); 204 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT); 205 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT); 206 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue(); 207 } 208 209 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) { 210 for (const User *U : CxtI->users()) { 211 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) 212 if (IC->isEquality()) 213 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1))) 214 if (C->isNullValue()) 215 continue; 216 return false; 217 } 218 return true; 219 } 220 221 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 222 const Query &Q); 223 224 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 225 bool OrZero, 226 unsigned Depth, AssumptionCache *AC, 227 const Instruction *CxtI, 228 const DominatorTree *DT) { 229 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, 230 Query(DL, AC, safeCxtI(V, CxtI), DT)); 231 } 232 233 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 234 235 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 236 AssumptionCache *AC, const Instruction *CxtI, 237 const DominatorTree *DT) { 238 return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 239 } 240 241 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 242 unsigned Depth, 243 AssumptionCache *AC, const Instruction *CxtI, 244 const DominatorTree *DT) { 245 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT); 246 return Known.isNonNegative(); 247 } 248 249 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 250 AssumptionCache *AC, const Instruction *CxtI, 251 const DominatorTree *DT) { 252 if (auto *CI = dyn_cast<ConstantInt>(V)) 253 return CI->getValue().isStrictlyPositive(); 254 255 // TODO: We'd doing two recursive queries here. We should factor this such 256 // that only a single query is needed. 257 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) && 258 isKnownNonZero(V, DL, Depth, AC, CxtI, DT); 259 } 260 261 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 262 AssumptionCache *AC, const Instruction *CxtI, 263 const DominatorTree *DT) { 264 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT); 265 return Known.isNegative(); 266 } 267 268 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); 269 270 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 271 const DataLayout &DL, 272 AssumptionCache *AC, const Instruction *CxtI, 273 const DominatorTree *DT) { 274 return ::isKnownNonEqual(V1, V2, Query(DL, AC, 275 safeCxtI(V1, safeCxtI(V2, CxtI)), 276 DT)); 277 } 278 279 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 280 const Query &Q); 281 282 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 283 const DataLayout &DL, 284 unsigned Depth, AssumptionCache *AC, 285 const Instruction *CxtI, const DominatorTree *DT) { 286 return ::MaskedValueIsZero(V, Mask, Depth, 287 Query(DL, AC, safeCxtI(V, CxtI), DT)); 288 } 289 290 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 291 const Query &Q); 292 293 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 294 unsigned Depth, AssumptionCache *AC, 295 const Instruction *CxtI, 296 const DominatorTree *DT) { 297 return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 298 } 299 300 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 301 bool NSW, 302 KnownBits &KnownOut, KnownBits &Known2, 303 unsigned Depth, const Query &Q) { 304 unsigned BitWidth = KnownOut.getBitWidth(); 305 306 // If an initial sequence of bits in the result is not needed, the 307 // corresponding bits in the operands are not needed. 308 KnownBits LHSKnown(BitWidth); 309 computeKnownBits(Op0, LHSKnown, Depth + 1, Q); 310 computeKnownBits(Op1, Known2, Depth + 1, Q); 311 312 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2); 313 } 314 315 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 316 KnownBits &Known, KnownBits &Known2, 317 unsigned Depth, const Query &Q) { 318 unsigned BitWidth = Known.getBitWidth(); 319 computeKnownBits(Op1, Known, Depth + 1, Q); 320 computeKnownBits(Op0, Known2, Depth + 1, Q); 321 322 bool isKnownNegative = false; 323 bool isKnownNonNegative = false; 324 // If the multiplication is known not to overflow, compute the sign bit. 325 if (NSW) { 326 if (Op0 == Op1) { 327 // The product of a number with itself is non-negative. 328 isKnownNonNegative = true; 329 } else { 330 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 331 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 332 bool isKnownNegativeOp1 = Known.isNegative(); 333 bool isKnownNegativeOp0 = Known2.isNegative(); 334 // The product of two numbers with the same sign is non-negative. 335 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 336 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 337 // The product of a negative number and a non-negative number is either 338 // negative or zero. 339 if (!isKnownNonNegative) 340 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 341 isKnownNonZero(Op0, Depth, Q)) || 342 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 343 isKnownNonZero(Op1, Depth, Q)); 344 } 345 } 346 347 assert(!Known.hasConflict() && !Known2.hasConflict()); 348 // Compute a conservative estimate for high known-0 bits. 349 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 350 Known2.countMinLeadingZeros(), 351 BitWidth) - BitWidth; 352 LeadZ = std::min(LeadZ, BitWidth); 353 354 // The result of the bottom bits of an integer multiply can be 355 // inferred by looking at the bottom bits of both operands and 356 // multiplying them together. 357 // We can infer at least the minimum number of known trailing bits 358 // of both operands. Depending on number of trailing zeros, we can 359 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming 360 // a and b are divisible by m and n respectively. 361 // We then calculate how many of those bits are inferrable and set 362 // the output. For example, the i8 mul: 363 // a = XXXX1100 (12) 364 // b = XXXX1110 (14) 365 // We know the bottom 3 bits are zero since the first can be divided by 366 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4). 367 // Applying the multiplication to the trimmed arguments gets: 368 // XX11 (3) 369 // X111 (7) 370 // ------- 371 // XX11 372 // XX11 373 // XX11 374 // XX11 375 // ------- 376 // XXXXX01 377 // Which allows us to infer the 2 LSBs. Since we're multiplying the result 378 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits. 379 // The proof for this can be described as: 380 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) && 381 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) + 382 // umin(countTrailingZeros(C2), C6) + 383 // umin(C5 - umin(countTrailingZeros(C1), C5), 384 // C6 - umin(countTrailingZeros(C2), C6)))) - 1) 385 // %aa = shl i8 %a, C5 386 // %bb = shl i8 %b, C6 387 // %aaa = or i8 %aa, C1 388 // %bbb = or i8 %bb, C2 389 // %mul = mul i8 %aaa, %bbb 390 // %mask = and i8 %mul, C7 391 // => 392 // %mask = i8 ((C1*C2)&C7) 393 // Where C5, C6 describe the known bits of %a, %b 394 // C1, C2 describe the known bottom bits of %a, %b. 395 // C7 describes the mask of the known bits of the result. 396 APInt Bottom0 = Known.One; 397 APInt Bottom1 = Known2.One; 398 399 // How many times we'd be able to divide each argument by 2 (shr by 1). 400 // This gives us the number of trailing zeros on the multiplication result. 401 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes(); 402 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes(); 403 unsigned TrailZero0 = Known.countMinTrailingZeros(); 404 unsigned TrailZero1 = Known2.countMinTrailingZeros(); 405 unsigned TrailZ = TrailZero0 + TrailZero1; 406 407 // Figure out the fewest known-bits operand. 408 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0, 409 TrailBitsKnown1 - TrailZero1); 410 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth); 411 412 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) * 413 Bottom1.getLoBits(TrailBitsKnown1); 414 415 Known.resetAll(); 416 Known.Zero.setHighBits(LeadZ); 417 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown); 418 Known.One |= BottomKnown.getLoBits(ResultBitsKnown); 419 420 // Only make use of no-wrap flags if we failed to compute the sign bit 421 // directly. This matters if the multiplication always overflows, in 422 // which case we prefer to follow the result of the direct computation, 423 // though as the program is invoking undefined behaviour we can choose 424 // whatever we like here. 425 if (isKnownNonNegative && !Known.isNegative()) 426 Known.makeNonNegative(); 427 else if (isKnownNegative && !Known.isNonNegative()) 428 Known.makeNegative(); 429 } 430 431 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 432 KnownBits &Known) { 433 unsigned BitWidth = Known.getBitWidth(); 434 unsigned NumRanges = Ranges.getNumOperands() / 2; 435 assert(NumRanges >= 1); 436 437 Known.Zero.setAllBits(); 438 Known.One.setAllBits(); 439 440 for (unsigned i = 0; i < NumRanges; ++i) { 441 ConstantInt *Lower = 442 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 443 ConstantInt *Upper = 444 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 445 ConstantRange Range(Lower->getValue(), Upper->getValue()); 446 447 // The first CommonPrefixBits of all values in Range are equal. 448 unsigned CommonPrefixBits = 449 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 450 451 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 452 Known.One &= Range.getUnsignedMax() & Mask; 453 Known.Zero &= ~Range.getUnsignedMax() & Mask; 454 } 455 } 456 457 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 458 SmallVector<const Value *, 16> WorkSet(1, I); 459 SmallPtrSet<const Value *, 32> Visited; 460 SmallPtrSet<const Value *, 16> EphValues; 461 462 // The instruction defining an assumption's condition itself is always 463 // considered ephemeral to that assumption (even if it has other 464 // non-ephemeral users). See r246696's test case for an example. 465 if (is_contained(I->operands(), E)) 466 return true; 467 468 while (!WorkSet.empty()) { 469 const Value *V = WorkSet.pop_back_val(); 470 if (!Visited.insert(V).second) 471 continue; 472 473 // If all uses of this value are ephemeral, then so is this value. 474 if (llvm::all_of(V->users(), [&](const User *U) { 475 return EphValues.count(U); 476 })) { 477 if (V == E) 478 return true; 479 480 if (V == I || isSafeToSpeculativelyExecute(V)) { 481 EphValues.insert(V); 482 if (const User *U = dyn_cast<User>(V)) 483 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 484 J != JE; ++J) 485 WorkSet.push_back(*J); 486 } 487 } 488 } 489 490 return false; 491 } 492 493 // Is this an intrinsic that cannot be speculated but also cannot trap? 494 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { 495 if (const CallInst *CI = dyn_cast<CallInst>(I)) 496 if (Function *F = CI->getCalledFunction()) 497 switch (F->getIntrinsicID()) { 498 default: break; 499 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 500 case Intrinsic::assume: 501 case Intrinsic::sideeffect: 502 case Intrinsic::dbg_declare: 503 case Intrinsic::dbg_value: 504 case Intrinsic::dbg_label: 505 case Intrinsic::invariant_start: 506 case Intrinsic::invariant_end: 507 case Intrinsic::lifetime_start: 508 case Intrinsic::lifetime_end: 509 case Intrinsic::objectsize: 510 case Intrinsic::ptr_annotation: 511 case Intrinsic::var_annotation: 512 return true; 513 } 514 515 return false; 516 } 517 518 bool llvm::isValidAssumeForContext(const Instruction *Inv, 519 const Instruction *CxtI, 520 const DominatorTree *DT) { 521 // There are two restrictions on the use of an assume: 522 // 1. The assume must dominate the context (or the control flow must 523 // reach the assume whenever it reaches the context). 524 // 2. The context must not be in the assume's set of ephemeral values 525 // (otherwise we will use the assume to prove that the condition 526 // feeding the assume is trivially true, thus causing the removal of 527 // the assume). 528 529 if (DT) { 530 if (DT->dominates(Inv, CxtI)) 531 return true; 532 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 533 // We don't have a DT, but this trivially dominates. 534 return true; 535 } 536 537 // With or without a DT, the only remaining case we will check is if the 538 // instructions are in the same BB. Give up if that is not the case. 539 if (Inv->getParent() != CxtI->getParent()) 540 return false; 541 542 // If we have a dom tree, then we now know that the assume doesn't dominate 543 // the other instruction. If we don't have a dom tree then we can check if 544 // the assume is first in the BB. 545 if (!DT) { 546 // Search forward from the assume until we reach the context (or the end 547 // of the block); the common case is that the assume will come first. 548 for (auto I = std::next(BasicBlock::const_iterator(Inv)), 549 IE = Inv->getParent()->end(); I != IE; ++I) 550 if (&*I == CxtI) 551 return true; 552 } 553 554 // The context comes first, but they're both in the same block. Make sure 555 // there is nothing in between that might interrupt the control flow. 556 for (BasicBlock::const_iterator I = 557 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv); 558 I != IE; ++I) 559 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 560 return false; 561 562 return !isEphemeralValueOf(Inv, CxtI); 563 } 564 565 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 566 unsigned Depth, const Query &Q) { 567 // Use of assumptions is context-sensitive. If we don't have a context, we 568 // cannot use them! 569 if (!Q.AC || !Q.CxtI) 570 return; 571 572 unsigned BitWidth = Known.getBitWidth(); 573 574 // Note that the patterns below need to be kept in sync with the code 575 // in AssumptionCache::updateAffectedValues. 576 577 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 578 if (!AssumeVH) 579 continue; 580 CallInst *I = cast<CallInst>(AssumeVH); 581 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 582 "Got assumption for the wrong function!"); 583 if (Q.isExcluded(I)) 584 continue; 585 586 // Warning: This loop can end up being somewhat performance sensitive. 587 // We're running this loop for once for each value queried resulting in a 588 // runtime of ~O(#assumes * #values). 589 590 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 591 "must be an assume intrinsic"); 592 593 Value *Arg = I->getArgOperand(0); 594 595 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 596 assert(BitWidth == 1 && "assume operand is not i1?"); 597 Known.setAllOnes(); 598 return; 599 } 600 if (match(Arg, m_Not(m_Specific(V))) && 601 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 602 assert(BitWidth == 1 && "assume operand is not i1?"); 603 Known.setAllZero(); 604 return; 605 } 606 607 // The remaining tests are all recursive, so bail out if we hit the limit. 608 if (Depth == MaxDepth) 609 continue; 610 611 Value *A, *B; 612 auto m_V = m_CombineOr(m_Specific(V), 613 m_CombineOr(m_PtrToInt(m_Specific(V)), 614 m_BitCast(m_Specific(V)))); 615 616 CmpInst::Predicate Pred; 617 uint64_t C; 618 // assume(v = a) 619 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 620 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 621 KnownBits RHSKnown(BitWidth); 622 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 623 Known.Zero |= RHSKnown.Zero; 624 Known.One |= RHSKnown.One; 625 // assume(v & b = a) 626 } else if (match(Arg, 627 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 628 Pred == ICmpInst::ICMP_EQ && 629 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 630 KnownBits RHSKnown(BitWidth); 631 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 632 KnownBits MaskKnown(BitWidth); 633 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 634 635 // For those bits in the mask that are known to be one, we can propagate 636 // known bits from the RHS to V. 637 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 638 Known.One |= RHSKnown.One & MaskKnown.One; 639 // assume(~(v & b) = a) 640 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 641 m_Value(A))) && 642 Pred == ICmpInst::ICMP_EQ && 643 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 644 KnownBits RHSKnown(BitWidth); 645 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 646 KnownBits MaskKnown(BitWidth); 647 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 648 649 // For those bits in the mask that are known to be one, we can propagate 650 // inverted known bits from the RHS to V. 651 Known.Zero |= RHSKnown.One & MaskKnown.One; 652 Known.One |= RHSKnown.Zero & MaskKnown.One; 653 // assume(v | b = a) 654 } else if (match(Arg, 655 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 656 Pred == ICmpInst::ICMP_EQ && 657 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 658 KnownBits RHSKnown(BitWidth); 659 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 660 KnownBits BKnown(BitWidth); 661 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 662 663 // For those bits in B that are known to be zero, we can propagate known 664 // bits from the RHS to V. 665 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 666 Known.One |= RHSKnown.One & BKnown.Zero; 667 // assume(~(v | b) = a) 668 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 669 m_Value(A))) && 670 Pred == ICmpInst::ICMP_EQ && 671 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 672 KnownBits RHSKnown(BitWidth); 673 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 674 KnownBits BKnown(BitWidth); 675 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 676 677 // For those bits in B that are known to be zero, we can propagate 678 // inverted known bits from the RHS to V. 679 Known.Zero |= RHSKnown.One & BKnown.Zero; 680 Known.One |= RHSKnown.Zero & BKnown.Zero; 681 // assume(v ^ b = a) 682 } else if (match(Arg, 683 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 684 Pred == ICmpInst::ICMP_EQ && 685 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 686 KnownBits RHSKnown(BitWidth); 687 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 688 KnownBits BKnown(BitWidth); 689 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 690 691 // For those bits in B that are known to be zero, we can propagate known 692 // bits from the RHS to V. For those bits in B that are known to be one, 693 // we can propagate inverted known bits from the RHS to V. 694 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 695 Known.One |= RHSKnown.One & BKnown.Zero; 696 Known.Zero |= RHSKnown.One & BKnown.One; 697 Known.One |= RHSKnown.Zero & BKnown.One; 698 // assume(~(v ^ b) = a) 699 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 700 m_Value(A))) && 701 Pred == ICmpInst::ICMP_EQ && 702 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 703 KnownBits RHSKnown(BitWidth); 704 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 705 KnownBits BKnown(BitWidth); 706 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 707 708 // For those bits in B that are known to be zero, we can propagate 709 // inverted known bits from the RHS to V. For those bits in B that are 710 // known to be one, we can propagate known bits from the RHS to V. 711 Known.Zero |= RHSKnown.One & BKnown.Zero; 712 Known.One |= RHSKnown.Zero & BKnown.Zero; 713 Known.Zero |= RHSKnown.Zero & BKnown.One; 714 Known.One |= RHSKnown.One & BKnown.One; 715 // assume(v << c = a) 716 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 717 m_Value(A))) && 718 Pred == ICmpInst::ICMP_EQ && 719 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 720 C < BitWidth) { 721 KnownBits RHSKnown(BitWidth); 722 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 723 // For those bits in RHS that are known, we can propagate them to known 724 // bits in V shifted to the right by C. 725 RHSKnown.Zero.lshrInPlace(C); 726 Known.Zero |= RHSKnown.Zero; 727 RHSKnown.One.lshrInPlace(C); 728 Known.One |= RHSKnown.One; 729 // assume(~(v << c) = a) 730 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 731 m_Value(A))) && 732 Pred == ICmpInst::ICMP_EQ && 733 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 734 C < BitWidth) { 735 KnownBits RHSKnown(BitWidth); 736 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 737 // For those bits in RHS that are known, we can propagate them inverted 738 // to known bits in V shifted to the right by C. 739 RHSKnown.One.lshrInPlace(C); 740 Known.Zero |= RHSKnown.One; 741 RHSKnown.Zero.lshrInPlace(C); 742 Known.One |= RHSKnown.Zero; 743 // assume(v >> c = a) 744 } else if (match(Arg, 745 m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), 746 m_Value(A))) && 747 Pred == ICmpInst::ICMP_EQ && 748 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 749 C < BitWidth) { 750 KnownBits RHSKnown(BitWidth); 751 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 752 // For those bits in RHS that are known, we can propagate them to known 753 // bits in V shifted to the right by C. 754 Known.Zero |= RHSKnown.Zero << C; 755 Known.One |= RHSKnown.One << C; 756 // assume(~(v >> c) = a) 757 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), 758 m_Value(A))) && 759 Pred == ICmpInst::ICMP_EQ && 760 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 761 C < BitWidth) { 762 KnownBits RHSKnown(BitWidth); 763 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 764 // For those bits in RHS that are known, we can propagate them inverted 765 // to known bits in V shifted to the right by C. 766 Known.Zero |= RHSKnown.One << C; 767 Known.One |= RHSKnown.Zero << C; 768 // assume(v >=_s c) where c is non-negative 769 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 770 Pred == ICmpInst::ICMP_SGE && 771 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 772 KnownBits RHSKnown(BitWidth); 773 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 774 775 if (RHSKnown.isNonNegative()) { 776 // We know that the sign bit is zero. 777 Known.makeNonNegative(); 778 } 779 // assume(v >_s c) where c is at least -1. 780 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 781 Pred == ICmpInst::ICMP_SGT && 782 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 783 KnownBits RHSKnown(BitWidth); 784 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 785 786 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { 787 // We know that the sign bit is zero. 788 Known.makeNonNegative(); 789 } 790 // assume(v <=_s c) where c is negative 791 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 792 Pred == ICmpInst::ICMP_SLE && 793 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 794 KnownBits RHSKnown(BitWidth); 795 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 796 797 if (RHSKnown.isNegative()) { 798 // We know that the sign bit is one. 799 Known.makeNegative(); 800 } 801 // assume(v <_s c) where c is non-positive 802 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 803 Pred == ICmpInst::ICMP_SLT && 804 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 805 KnownBits RHSKnown(BitWidth); 806 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 807 808 if (RHSKnown.isZero() || RHSKnown.isNegative()) { 809 // We know that the sign bit is one. 810 Known.makeNegative(); 811 } 812 // assume(v <=_u c) 813 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 814 Pred == ICmpInst::ICMP_ULE && 815 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 816 KnownBits RHSKnown(BitWidth); 817 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 818 819 // Whatever high bits in c are zero are known to be zero. 820 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 821 // assume(v <_u c) 822 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 823 Pred == ICmpInst::ICMP_ULT && 824 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 825 KnownBits RHSKnown(BitWidth); 826 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 827 828 // If the RHS is known zero, then this assumption must be wrong (nothing 829 // is unsigned less than zero). Signal a conflict and get out of here. 830 if (RHSKnown.isZero()) { 831 Known.Zero.setAllBits(); 832 Known.One.setAllBits(); 833 break; 834 } 835 836 // Whatever high bits in c are zero are known to be zero (if c is a power 837 // of 2, then one more). 838 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) 839 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); 840 else 841 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 842 } 843 } 844 845 // If assumptions conflict with each other or previous known bits, then we 846 // have a logical fallacy. It's possible that the assumption is not reachable, 847 // so this isn't a real bug. On the other hand, the program may have undefined 848 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 849 // clear out the known bits, try to warn the user, and hope for the best. 850 if (Known.Zero.intersects(Known.One)) { 851 Known.resetAll(); 852 853 if (Q.ORE) 854 Q.ORE->emit([&]() { 855 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 856 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", 857 CxtI) 858 << "Detected conflicting code assumptions. Program may " 859 "have undefined behavior, or compiler may have " 860 "internal error."; 861 }); 862 } 863 } 864 865 /// Compute known bits from a shift operator, including those with a 866 /// non-constant shift amount. Known is the output of this function. Known2 is a 867 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are 868 /// operator-specific functions that, given the known-zero or known-one bits 869 /// respectively, and a shift amount, compute the implied known-zero or 870 /// known-one bits of the shift operator's result respectively for that shift 871 /// amount. The results from calling KZF and KOF are conservatively combined for 872 /// all permitted shift amounts. 873 static void computeKnownBitsFromShiftOperator( 874 const Operator *I, KnownBits &Known, KnownBits &Known2, 875 unsigned Depth, const Query &Q, 876 function_ref<APInt(const APInt &, unsigned)> KZF, 877 function_ref<APInt(const APInt &, unsigned)> KOF) { 878 unsigned BitWidth = Known.getBitWidth(); 879 880 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 881 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); 882 883 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 884 Known.Zero = KZF(Known.Zero, ShiftAmt); 885 Known.One = KOF(Known.One, ShiftAmt); 886 // If the known bits conflict, this must be an overflowing left shift, so 887 // the shift result is poison. We can return anything we want. Choose 0 for 888 // the best folding opportunity. 889 if (Known.hasConflict()) 890 Known.setAllZero(); 891 892 return; 893 } 894 895 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 896 897 // If the shift amount could be greater than or equal to the bit-width of the 898 // LHS, the value could be poison, but bail out because the check below is 899 // expensive. TODO: Should we just carry on? 900 if ((~Known.Zero).uge(BitWidth)) { 901 Known.resetAll(); 902 return; 903 } 904 905 // Note: We cannot use Known.Zero.getLimitedValue() here, because if 906 // BitWidth > 64 and any upper bits are known, we'll end up returning the 907 // limit value (which implies all bits are known). 908 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); 909 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); 910 911 // It would be more-clearly correct to use the two temporaries for this 912 // calculation. Reusing the APInts here to prevent unnecessary allocations. 913 Known.resetAll(); 914 915 // If we know the shifter operand is nonzero, we can sometimes infer more 916 // known bits. However this is expensive to compute, so be lazy about it and 917 // only compute it when absolutely necessary. 918 Optional<bool> ShifterOperandIsNonZero; 919 920 // Early exit if we can't constrain any well-defined shift amount. 921 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && 922 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { 923 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q); 924 if (!*ShifterOperandIsNonZero) 925 return; 926 } 927 928 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 929 930 Known.Zero.setAllBits(); 931 Known.One.setAllBits(); 932 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 933 // Combine the shifted known input bits only for those shift amounts 934 // compatible with its known constraints. 935 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 936 continue; 937 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 938 continue; 939 // If we know the shifter is nonzero, we may be able to infer more known 940 // bits. This check is sunk down as far as possible to avoid the expensive 941 // call to isKnownNonZero if the cheaper checks above fail. 942 if (ShiftAmt == 0) { 943 if (!ShifterOperandIsNonZero.hasValue()) 944 ShifterOperandIsNonZero = 945 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 946 if (*ShifterOperandIsNonZero) 947 continue; 948 } 949 950 Known.Zero &= KZF(Known2.Zero, ShiftAmt); 951 Known.One &= KOF(Known2.One, ShiftAmt); 952 } 953 954 // If the known bits conflict, the result is poison. Return a 0 and hope the 955 // caller can further optimize that. 956 if (Known.hasConflict()) 957 Known.setAllZero(); 958 } 959 960 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known, 961 unsigned Depth, const Query &Q) { 962 unsigned BitWidth = Known.getBitWidth(); 963 964 KnownBits Known2(Known); 965 switch (I->getOpcode()) { 966 default: break; 967 case Instruction::Load: 968 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 969 computeKnownBitsFromRangeMetadata(*MD, Known); 970 break; 971 case Instruction::And: { 972 // If either the LHS or the RHS are Zero, the result is zero. 973 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 974 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 975 976 // Output known-1 bits are only known if set in both the LHS & RHS. 977 Known.One &= Known2.One; 978 // Output known-0 are known to be clear if zero in either the LHS | RHS. 979 Known.Zero |= Known2.Zero; 980 981 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 982 // here we handle the more general case of adding any odd number by 983 // matching the form add(x, add(x, y)) where y is odd. 984 // TODO: This could be generalized to clearing any bit set in y where the 985 // following bit is known to be unset in y. 986 Value *X = nullptr, *Y = nullptr; 987 if (!Known.Zero[0] && !Known.One[0] && 988 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) { 989 Known2.resetAll(); 990 computeKnownBits(Y, Known2, Depth + 1, Q); 991 if (Known2.countMinTrailingOnes() > 0) 992 Known.Zero.setBit(0); 993 } 994 break; 995 } 996 case Instruction::Or: 997 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 998 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 999 1000 // Output known-0 bits are only known if clear in both the LHS & RHS. 1001 Known.Zero &= Known2.Zero; 1002 // Output known-1 are known to be set if set in either the LHS | RHS. 1003 Known.One |= Known2.One; 1004 break; 1005 case Instruction::Xor: { 1006 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 1007 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1008 1009 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1010 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 1011 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1012 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 1013 Known.Zero = std::move(KnownZeroOut); 1014 break; 1015 } 1016 case Instruction::Mul: { 1017 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1018 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known, 1019 Known2, Depth, Q); 1020 break; 1021 } 1022 case Instruction::UDiv: { 1023 // For the purposes of computing leading zeros we can conservatively 1024 // treat a udiv as a logical right shift by the power of 2 known to 1025 // be less than the denominator. 1026 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1027 unsigned LeadZ = Known2.countMinLeadingZeros(); 1028 1029 Known2.resetAll(); 1030 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1031 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 1032 if (RHSMaxLeadingZeros != BitWidth) 1033 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 1034 1035 Known.Zero.setHighBits(LeadZ); 1036 break; 1037 } 1038 case Instruction::Select: { 1039 const Value *LHS, *RHS; 1040 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 1041 if (SelectPatternResult::isMinOrMax(SPF)) { 1042 computeKnownBits(RHS, Known, Depth + 1, Q); 1043 computeKnownBits(LHS, Known2, Depth + 1, Q); 1044 } else { 1045 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 1046 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1047 } 1048 1049 unsigned MaxHighOnes = 0; 1050 unsigned MaxHighZeros = 0; 1051 if (SPF == SPF_SMAX) { 1052 // If both sides are negative, the result is negative. 1053 if (Known.isNegative() && Known2.isNegative()) 1054 // We can derive a lower bound on the result by taking the max of the 1055 // leading one bits. 1056 MaxHighOnes = 1057 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1058 // If either side is non-negative, the result is non-negative. 1059 else if (Known.isNonNegative() || Known2.isNonNegative()) 1060 MaxHighZeros = 1; 1061 } else if (SPF == SPF_SMIN) { 1062 // If both sides are non-negative, the result is non-negative. 1063 if (Known.isNonNegative() && Known2.isNonNegative()) 1064 // We can derive an upper bound on the result by taking the max of the 1065 // leading zero bits. 1066 MaxHighZeros = std::max(Known.countMinLeadingZeros(), 1067 Known2.countMinLeadingZeros()); 1068 // If either side is negative, the result is negative. 1069 else if (Known.isNegative() || Known2.isNegative()) 1070 MaxHighOnes = 1; 1071 } else if (SPF == SPF_UMAX) { 1072 // We can derive a lower bound on the result by taking the max of the 1073 // leading one bits. 1074 MaxHighOnes = 1075 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1076 } else if (SPF == SPF_UMIN) { 1077 // We can derive an upper bound on the result by taking the max of the 1078 // leading zero bits. 1079 MaxHighZeros = 1080 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1081 } 1082 1083 // Only known if known in both the LHS and RHS. 1084 Known.One &= Known2.One; 1085 Known.Zero &= Known2.Zero; 1086 if (MaxHighOnes > 0) 1087 Known.One.setHighBits(MaxHighOnes); 1088 if (MaxHighZeros > 0) 1089 Known.Zero.setHighBits(MaxHighZeros); 1090 break; 1091 } 1092 case Instruction::FPTrunc: 1093 case Instruction::FPExt: 1094 case Instruction::FPToUI: 1095 case Instruction::FPToSI: 1096 case Instruction::SIToFP: 1097 case Instruction::UIToFP: 1098 break; // Can't work with floating point. 1099 case Instruction::PtrToInt: 1100 case Instruction::IntToPtr: 1101 // Fall through and handle them the same as zext/trunc. 1102 LLVM_FALLTHROUGH; 1103 case Instruction::ZExt: 1104 case Instruction::Trunc: { 1105 Type *SrcTy = I->getOperand(0)->getType(); 1106 1107 unsigned SrcBitWidth; 1108 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1109 // which fall through here. 1110 Type *ScalarTy = SrcTy->getScalarType(); 1111 SrcBitWidth = ScalarTy->isPointerTy() ? 1112 Q.DL.getIndexTypeSizeInBits(ScalarTy) : 1113 Q.DL.getTypeSizeInBits(ScalarTy); 1114 1115 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1116 Known = Known.zextOrTrunc(SrcBitWidth); 1117 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1118 Known = Known.zextOrTrunc(BitWidth); 1119 // Any top bits are known to be zero. 1120 if (BitWidth > SrcBitWidth) 1121 Known.Zero.setBitsFrom(SrcBitWidth); 1122 break; 1123 } 1124 case Instruction::BitCast: { 1125 Type *SrcTy = I->getOperand(0)->getType(); 1126 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 1127 // TODO: For now, not handling conversions like: 1128 // (bitcast i64 %x to <2 x i32>) 1129 !I->getType()->isVectorTy()) { 1130 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1131 break; 1132 } 1133 break; 1134 } 1135 case Instruction::SExt: { 1136 // Compute the bits in the result that are not present in the input. 1137 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1138 1139 Known = Known.trunc(SrcBitWidth); 1140 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1141 // If the sign bit of the input is known set or clear, then we know the 1142 // top bits of the result. 1143 Known = Known.sext(BitWidth); 1144 break; 1145 } 1146 case Instruction::Shl: { 1147 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1148 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1149 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) { 1150 APInt KZResult = KnownZero << ShiftAmt; 1151 KZResult.setLowBits(ShiftAmt); // Low bits known 0. 1152 // If this shift has "nsw" keyword, then the result is either a poison 1153 // value or has the same sign bit as the first operand. 1154 if (NSW && KnownZero.isSignBitSet()) 1155 KZResult.setSignBit(); 1156 return KZResult; 1157 }; 1158 1159 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) { 1160 APInt KOResult = KnownOne << ShiftAmt; 1161 if (NSW && KnownOne.isSignBitSet()) 1162 KOResult.setSignBit(); 1163 return KOResult; 1164 }; 1165 1166 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1167 break; 1168 } 1169 case Instruction::LShr: { 1170 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1171 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1172 APInt KZResult = KnownZero.lshr(ShiftAmt); 1173 // High bits known zero. 1174 KZResult.setHighBits(ShiftAmt); 1175 return KZResult; 1176 }; 1177 1178 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1179 return KnownOne.lshr(ShiftAmt); 1180 }; 1181 1182 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1183 break; 1184 } 1185 case Instruction::AShr: { 1186 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1187 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1188 return KnownZero.ashr(ShiftAmt); 1189 }; 1190 1191 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1192 return KnownOne.ashr(ShiftAmt); 1193 }; 1194 1195 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1196 break; 1197 } 1198 case Instruction::Sub: { 1199 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1200 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1201 Known, Known2, Depth, Q); 1202 break; 1203 } 1204 case Instruction::Add: { 1205 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1206 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1207 Known, Known2, Depth, Q); 1208 break; 1209 } 1210 case Instruction::SRem: 1211 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1212 APInt RA = Rem->getValue().abs(); 1213 if (RA.isPowerOf2()) { 1214 APInt LowBits = RA - 1; 1215 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1216 1217 // The low bits of the first operand are unchanged by the srem. 1218 Known.Zero = Known2.Zero & LowBits; 1219 Known.One = Known2.One & LowBits; 1220 1221 // If the first operand is non-negative or has all low bits zero, then 1222 // the upper bits are all zero. 1223 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 1224 Known.Zero |= ~LowBits; 1225 1226 // If the first operand is negative and not all low bits are zero, then 1227 // the upper bits are all one. 1228 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 1229 Known.One |= ~LowBits; 1230 1231 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1232 break; 1233 } 1234 } 1235 1236 // The sign bit is the LHS's sign bit, except when the result of the 1237 // remainder is zero. 1238 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1239 // If it's known zero, our sign bit is also zero. 1240 if (Known2.isNonNegative()) 1241 Known.makeNonNegative(); 1242 1243 break; 1244 case Instruction::URem: { 1245 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1246 const APInt &RA = Rem->getValue(); 1247 if (RA.isPowerOf2()) { 1248 APInt LowBits = (RA - 1); 1249 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1250 Known.Zero |= ~LowBits; 1251 Known.One &= LowBits; 1252 break; 1253 } 1254 } 1255 1256 // Since the result is less than or equal to either operand, any leading 1257 // zero bits in either operand must also exist in the result. 1258 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1259 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1260 1261 unsigned Leaders = 1262 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1263 Known.resetAll(); 1264 Known.Zero.setHighBits(Leaders); 1265 break; 1266 } 1267 1268 case Instruction::Alloca: { 1269 const AllocaInst *AI = cast<AllocaInst>(I); 1270 unsigned Align = AI->getAlignment(); 1271 if (Align == 0) 1272 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); 1273 1274 if (Align > 0) 1275 Known.Zero.setLowBits(countTrailingZeros(Align)); 1276 break; 1277 } 1278 case Instruction::GetElementPtr: { 1279 // Analyze all of the subscripts of this getelementptr instruction 1280 // to determine if we can prove known low zero bits. 1281 KnownBits LocalKnown(BitWidth); 1282 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q); 1283 unsigned TrailZ = LocalKnown.countMinTrailingZeros(); 1284 1285 gep_type_iterator GTI = gep_type_begin(I); 1286 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1287 Value *Index = I->getOperand(i); 1288 if (StructType *STy = GTI.getStructTypeOrNull()) { 1289 // Handle struct member offset arithmetic. 1290 1291 // Handle case when index is vector zeroinitializer 1292 Constant *CIndex = cast<Constant>(Index); 1293 if (CIndex->isZeroValue()) 1294 continue; 1295 1296 if (CIndex->getType()->isVectorTy()) 1297 Index = CIndex->getSplatValue(); 1298 1299 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1300 const StructLayout *SL = Q.DL.getStructLayout(STy); 1301 uint64_t Offset = SL->getElementOffset(Idx); 1302 TrailZ = std::min<unsigned>(TrailZ, 1303 countTrailingZeros(Offset)); 1304 } else { 1305 // Handle array index arithmetic. 1306 Type *IndexedTy = GTI.getIndexedType(); 1307 if (!IndexedTy->isSized()) { 1308 TrailZ = 0; 1309 break; 1310 } 1311 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1312 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1313 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0); 1314 computeKnownBits(Index, LocalKnown, Depth + 1, Q); 1315 TrailZ = std::min(TrailZ, 1316 unsigned(countTrailingZeros(TypeSize) + 1317 LocalKnown.countMinTrailingZeros())); 1318 } 1319 } 1320 1321 Known.Zero.setLowBits(TrailZ); 1322 break; 1323 } 1324 case Instruction::PHI: { 1325 const PHINode *P = cast<PHINode>(I); 1326 // Handle the case of a simple two-predecessor recurrence PHI. 1327 // There's a lot more that could theoretically be done here, but 1328 // this is sufficient to catch some interesting cases. 1329 if (P->getNumIncomingValues() == 2) { 1330 for (unsigned i = 0; i != 2; ++i) { 1331 Value *L = P->getIncomingValue(i); 1332 Value *R = P->getIncomingValue(!i); 1333 Operator *LU = dyn_cast<Operator>(L); 1334 if (!LU) 1335 continue; 1336 unsigned Opcode = LU->getOpcode(); 1337 // Check for operations that have the property that if 1338 // both their operands have low zero bits, the result 1339 // will have low zero bits. 1340 if (Opcode == Instruction::Add || 1341 Opcode == Instruction::Sub || 1342 Opcode == Instruction::And || 1343 Opcode == Instruction::Or || 1344 Opcode == Instruction::Mul) { 1345 Value *LL = LU->getOperand(0); 1346 Value *LR = LU->getOperand(1); 1347 // Find a recurrence. 1348 if (LL == I) 1349 L = LR; 1350 else if (LR == I) 1351 L = LL; 1352 else 1353 break; 1354 // Ok, we have a PHI of the form L op= R. Check for low 1355 // zero bits. 1356 computeKnownBits(R, Known2, Depth + 1, Q); 1357 1358 // We need to take the minimum number of known bits 1359 KnownBits Known3(Known); 1360 computeKnownBits(L, Known3, Depth + 1, Q); 1361 1362 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1363 Known3.countMinTrailingZeros())); 1364 1365 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU); 1366 if (OverflowOp && OverflowOp->hasNoSignedWrap()) { 1367 // If initial value of recurrence is nonnegative, and we are adding 1368 // a nonnegative number with nsw, the result can only be nonnegative 1369 // or poison value regardless of the number of times we execute the 1370 // add in phi recurrence. If initial value is negative and we are 1371 // adding a negative number with nsw, the result can only be 1372 // negative or poison value. Similar arguments apply to sub and mul. 1373 // 1374 // (add non-negative, non-negative) --> non-negative 1375 // (add negative, negative) --> negative 1376 if (Opcode == Instruction::Add) { 1377 if (Known2.isNonNegative() && Known3.isNonNegative()) 1378 Known.makeNonNegative(); 1379 else if (Known2.isNegative() && Known3.isNegative()) 1380 Known.makeNegative(); 1381 } 1382 1383 // (sub nsw non-negative, negative) --> non-negative 1384 // (sub nsw negative, non-negative) --> negative 1385 else if (Opcode == Instruction::Sub && LL == I) { 1386 if (Known2.isNonNegative() && Known3.isNegative()) 1387 Known.makeNonNegative(); 1388 else if (Known2.isNegative() && Known3.isNonNegative()) 1389 Known.makeNegative(); 1390 } 1391 1392 // (mul nsw non-negative, non-negative) --> non-negative 1393 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1394 Known3.isNonNegative()) 1395 Known.makeNonNegative(); 1396 } 1397 1398 break; 1399 } 1400 } 1401 } 1402 1403 // Unreachable blocks may have zero-operand PHI nodes. 1404 if (P->getNumIncomingValues() == 0) 1405 break; 1406 1407 // Otherwise take the unions of the known bit sets of the operands, 1408 // taking conservative care to avoid excessive recursion. 1409 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) { 1410 // Skip if every incoming value references to ourself. 1411 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1412 break; 1413 1414 Known.Zero.setAllBits(); 1415 Known.One.setAllBits(); 1416 for (Value *IncValue : P->incoming_values()) { 1417 // Skip direct self references. 1418 if (IncValue == P) continue; 1419 1420 Known2 = KnownBits(BitWidth); 1421 // Recurse, but cap the recursion to one level, because we don't 1422 // want to waste time spinning around in loops. 1423 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q); 1424 Known.Zero &= Known2.Zero; 1425 Known.One &= Known2.One; 1426 // If all bits have been ruled out, there's no need to check 1427 // more operands. 1428 if (!Known.Zero && !Known.One) 1429 break; 1430 } 1431 } 1432 break; 1433 } 1434 case Instruction::Call: 1435 case Instruction::Invoke: 1436 // If range metadata is attached to this call, set known bits from that, 1437 // and then intersect with known bits based on other properties of the 1438 // function. 1439 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range)) 1440 computeKnownBitsFromRangeMetadata(*MD, Known); 1441 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) { 1442 computeKnownBits(RV, Known2, Depth + 1, Q); 1443 Known.Zero |= Known2.Zero; 1444 Known.One |= Known2.One; 1445 } 1446 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1447 switch (II->getIntrinsicID()) { 1448 default: break; 1449 case Intrinsic::bitreverse: 1450 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1451 Known.Zero |= Known2.Zero.reverseBits(); 1452 Known.One |= Known2.One.reverseBits(); 1453 break; 1454 case Intrinsic::bswap: 1455 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1456 Known.Zero |= Known2.Zero.byteSwap(); 1457 Known.One |= Known2.One.byteSwap(); 1458 break; 1459 case Intrinsic::ctlz: { 1460 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1461 // If we have a known 1, its position is our upper bound. 1462 unsigned PossibleLZ = Known2.One.countLeadingZeros(); 1463 // If this call is undefined for 0, the result will be less than 2^n. 1464 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1465 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1466 unsigned LowBits = Log2_32(PossibleLZ)+1; 1467 Known.Zero.setBitsFrom(LowBits); 1468 break; 1469 } 1470 case Intrinsic::cttz: { 1471 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1472 // If we have a known 1, its position is our upper bound. 1473 unsigned PossibleTZ = Known2.One.countTrailingZeros(); 1474 // If this call is undefined for 0, the result will be less than 2^n. 1475 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1476 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1477 unsigned LowBits = Log2_32(PossibleTZ)+1; 1478 Known.Zero.setBitsFrom(LowBits); 1479 break; 1480 } 1481 case Intrinsic::ctpop: { 1482 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1483 // We can bound the space the count needs. Also, bits known to be zero 1484 // can't contribute to the population. 1485 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1486 unsigned LowBits = Log2_32(BitsPossiblySet)+1; 1487 Known.Zero.setBitsFrom(LowBits); 1488 // TODO: we could bound KnownOne using the lower bound on the number 1489 // of bits which might be set provided by popcnt KnownOne2. 1490 break; 1491 } 1492 case Intrinsic::x86_sse42_crc32_64_64: 1493 Known.Zero.setBitsFrom(32); 1494 break; 1495 } 1496 } 1497 break; 1498 case Instruction::ExtractElement: 1499 // Look through extract element. At the moment we keep this simple and skip 1500 // tracking the specific element. But at least we might find information 1501 // valid for all elements of the vector (for example if vector is sign 1502 // extended, shifted, etc). 1503 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1504 break; 1505 case Instruction::ExtractValue: 1506 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1507 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1508 if (EVI->getNumIndices() != 1) break; 1509 if (EVI->getIndices()[0] == 0) { 1510 switch (II->getIntrinsicID()) { 1511 default: break; 1512 case Intrinsic::uadd_with_overflow: 1513 case Intrinsic::sadd_with_overflow: 1514 computeKnownBitsAddSub(true, II->getArgOperand(0), 1515 II->getArgOperand(1), false, Known, Known2, 1516 Depth, Q); 1517 break; 1518 case Intrinsic::usub_with_overflow: 1519 case Intrinsic::ssub_with_overflow: 1520 computeKnownBitsAddSub(false, II->getArgOperand(0), 1521 II->getArgOperand(1), false, Known, Known2, 1522 Depth, Q); 1523 break; 1524 case Intrinsic::umul_with_overflow: 1525 case Intrinsic::smul_with_overflow: 1526 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1527 Known, Known2, Depth, Q); 1528 break; 1529 } 1530 } 1531 } 1532 } 1533 } 1534 1535 /// Determine which bits of V are known to be either zero or one and return 1536 /// them. 1537 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { 1538 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1539 computeKnownBits(V, Known, Depth, Q); 1540 return Known; 1541 } 1542 1543 /// Determine which bits of V are known to be either zero or one and return 1544 /// them in the Known bit set. 1545 /// 1546 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1547 /// we cannot optimize based on the assumption that it is zero without changing 1548 /// it to be an explicit zero. If we don't change it to zero, other code could 1549 /// optimized based on the contradictory assumption that it is non-zero. 1550 /// Because instcombine aggressively folds operations with undef args anyway, 1551 /// this won't lose us code quality. 1552 /// 1553 /// This function is defined on values with integer type, values with pointer 1554 /// type, and vectors of integers. In the case 1555 /// where V is a vector, known zero, and known one values are the 1556 /// same width as the vector element, and the bit is set only if it is true 1557 /// for all of the elements in the vector. 1558 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 1559 const Query &Q) { 1560 assert(V && "No Value?"); 1561 assert(Depth <= MaxDepth && "Limit Search Depth"); 1562 unsigned BitWidth = Known.getBitWidth(); 1563 1564 assert((V->getType()->isIntOrIntVectorTy(BitWidth) || 1565 V->getType()->isPtrOrPtrVectorTy()) && 1566 "Not integer or pointer type!"); 1567 1568 Type *ScalarTy = V->getType()->getScalarType(); 1569 unsigned ExpectedWidth = ScalarTy->isPointerTy() ? 1570 Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); 1571 assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth"); 1572 (void)BitWidth; 1573 (void)ExpectedWidth; 1574 1575 const APInt *C; 1576 if (match(V, m_APInt(C))) { 1577 // We know all of the bits for a scalar constant or a splat vector constant! 1578 Known.One = *C; 1579 Known.Zero = ~Known.One; 1580 return; 1581 } 1582 // Null and aggregate-zero are all-zeros. 1583 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1584 Known.setAllZero(); 1585 return; 1586 } 1587 // Handle a constant vector by taking the intersection of the known bits of 1588 // each element. 1589 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 1590 // We know that CDS must be a vector of integers. Take the intersection of 1591 // each element. 1592 Known.Zero.setAllBits(); Known.One.setAllBits(); 1593 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1594 APInt Elt = CDS->getElementAsAPInt(i); 1595 Known.Zero &= ~Elt; 1596 Known.One &= Elt; 1597 } 1598 return; 1599 } 1600 1601 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1602 // We know that CV must be a vector of integers. Take the intersection of 1603 // each element. 1604 Known.Zero.setAllBits(); Known.One.setAllBits(); 1605 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1606 Constant *Element = CV->getAggregateElement(i); 1607 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1608 if (!ElementCI) { 1609 Known.resetAll(); 1610 return; 1611 } 1612 const APInt &Elt = ElementCI->getValue(); 1613 Known.Zero &= ~Elt; 1614 Known.One &= Elt; 1615 } 1616 return; 1617 } 1618 1619 // Start out not knowing anything. 1620 Known.resetAll(); 1621 1622 // We can't imply anything about undefs. 1623 if (isa<UndefValue>(V)) 1624 return; 1625 1626 // There's no point in looking through other users of ConstantData for 1627 // assumptions. Confirm that we've handled them all. 1628 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1629 1630 // Limit search depth. 1631 // All recursive calls that increase depth must come after this. 1632 if (Depth == MaxDepth) 1633 return; 1634 1635 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1636 // the bits of its aliasee. 1637 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1638 if (!GA->isInterposable()) 1639 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 1640 return; 1641 } 1642 1643 if (const Operator *I = dyn_cast<Operator>(V)) 1644 computeKnownBitsFromOperator(I, Known, Depth, Q); 1645 1646 // Aligned pointers have trailing zeros - refine Known.Zero set 1647 if (V->getType()->isPointerTy()) { 1648 unsigned Align = V->getPointerAlignment(Q.DL); 1649 if (Align) 1650 Known.Zero.setLowBits(countTrailingZeros(Align)); 1651 } 1652 1653 // computeKnownBitsFromAssume strictly refines Known. 1654 // Therefore, we run them after computeKnownBitsFromOperator. 1655 1656 // Check whether a nearby assume intrinsic can determine some known bits. 1657 computeKnownBitsFromAssume(V, Known, Depth, Q); 1658 1659 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1660 } 1661 1662 /// Return true if the given value is known to have exactly one 1663 /// bit set when defined. For vectors return true if every element is known to 1664 /// be a power of two when defined. Supports values with integer or pointer 1665 /// types and vectors of integers. 1666 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 1667 const Query &Q) { 1668 assert(Depth <= MaxDepth && "Limit Search Depth"); 1669 1670 // Attempt to match against constants. 1671 if (OrZero && match(V, m_Power2OrZero())) 1672 return true; 1673 if (match(V, m_Power2())) 1674 return true; 1675 1676 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1677 // it is shifted off the end then the result is undefined. 1678 if (match(V, m_Shl(m_One(), m_Value()))) 1679 return true; 1680 1681 // (signmask) >>l X is clearly a power of two if the one is not shifted off 1682 // the bottom. If it is shifted off the bottom then the result is undefined. 1683 if (match(V, m_LShr(m_SignMask(), m_Value()))) 1684 return true; 1685 1686 // The remaining tests are all recursive, so bail out if we hit the limit. 1687 if (Depth++ == MaxDepth) 1688 return false; 1689 1690 Value *X = nullptr, *Y = nullptr; 1691 // A shift left or a logical shift right of a power of two is a power of two 1692 // or zero. 1693 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1694 match(V, m_LShr(m_Value(X), m_Value())))) 1695 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 1696 1697 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1698 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 1699 1700 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 1701 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 1702 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 1703 1704 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1705 // A power of two and'd with anything is a power of two or zero. 1706 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 1707 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 1708 return true; 1709 // X & (-X) is always a power of two or zero. 1710 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1711 return true; 1712 return false; 1713 } 1714 1715 // Adding a power-of-two or zero to the same power-of-two or zero yields 1716 // either the original power-of-two, a larger power-of-two or zero. 1717 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1718 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1719 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { 1720 if (match(X, m_And(m_Specific(Y), m_Value())) || 1721 match(X, m_And(m_Value(), m_Specific(Y)))) 1722 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 1723 return true; 1724 if (match(Y, m_And(m_Specific(X), m_Value())) || 1725 match(Y, m_And(m_Value(), m_Specific(X)))) 1726 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 1727 return true; 1728 1729 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1730 KnownBits LHSBits(BitWidth); 1731 computeKnownBits(X, LHSBits, Depth, Q); 1732 1733 KnownBits RHSBits(BitWidth); 1734 computeKnownBits(Y, RHSBits, Depth, Q); 1735 // If i8 V is a power of two or zero: 1736 // ZeroBits: 1 1 1 0 1 1 1 1 1737 // ~ZeroBits: 0 0 0 1 0 0 0 0 1738 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 1739 // If OrZero isn't set, we cannot give back a zero result. 1740 // Make sure either the LHS or RHS has a bit set. 1741 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 1742 return true; 1743 } 1744 } 1745 1746 // An exact divide or right shift can only shift off zero bits, so the result 1747 // is a power of two only if the first operand is a power of two and not 1748 // copying a sign bit (sdiv int_min, 2). 1749 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1750 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1751 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1752 Depth, Q); 1753 } 1754 1755 return false; 1756 } 1757 1758 /// Test whether a GEP's result is known to be non-null. 1759 /// 1760 /// Uses properties inherent in a GEP to try to determine whether it is known 1761 /// to be non-null. 1762 /// 1763 /// Currently this routine does not support vector GEPs. 1764 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 1765 const Query &Q) { 1766 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) 1767 return false; 1768 1769 // FIXME: Support vector-GEPs. 1770 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1771 1772 // If the base pointer is non-null, we cannot walk to a null address with an 1773 // inbounds GEP in address space zero. 1774 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 1775 return true; 1776 1777 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1778 // If so, then the GEP cannot produce a null pointer, as doing so would 1779 // inherently violate the inbounds contract within address space zero. 1780 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1781 GTI != GTE; ++GTI) { 1782 // Struct types are easy -- they must always be indexed by a constant. 1783 if (StructType *STy = GTI.getStructTypeOrNull()) { 1784 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1785 unsigned ElementIdx = OpC->getZExtValue(); 1786 const StructLayout *SL = Q.DL.getStructLayout(STy); 1787 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1788 if (ElementOffset > 0) 1789 return true; 1790 continue; 1791 } 1792 1793 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1794 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1795 continue; 1796 1797 // Fast path the constant operand case both for efficiency and so we don't 1798 // increment Depth when just zipping down an all-constant GEP. 1799 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1800 if (!OpC->isZero()) 1801 return true; 1802 continue; 1803 } 1804 1805 // We post-increment Depth here because while isKnownNonZero increments it 1806 // as well, when we pop back up that increment won't persist. We don't want 1807 // to recurse 10k times just because we have 10k GEP operands. We don't 1808 // bail completely out because we want to handle constant GEPs regardless 1809 // of depth. 1810 if (Depth++ >= MaxDepth) 1811 continue; 1812 1813 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 1814 return true; 1815 } 1816 1817 return false; 1818 } 1819 1820 static bool isKnownNonNullFromDominatingCondition(const Value *V, 1821 const Instruction *CtxI, 1822 const DominatorTree *DT) { 1823 assert(V->getType()->isPointerTy() && "V must be pointer type"); 1824 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull"); 1825 1826 if (!CtxI || !DT) 1827 return false; 1828 1829 unsigned NumUsesExplored = 0; 1830 for (auto *U : V->users()) { 1831 // Avoid massive lists 1832 if (NumUsesExplored >= DomConditionsMaxUses) 1833 break; 1834 NumUsesExplored++; 1835 1836 // If the value is used as an argument to a call or invoke, then argument 1837 // attributes may provide an answer about null-ness. 1838 if (auto CS = ImmutableCallSite(U)) 1839 if (auto *CalledFunc = CS.getCalledFunction()) 1840 for (const Argument &Arg : CalledFunc->args()) 1841 if (CS.getArgOperand(Arg.getArgNo()) == V && 1842 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) 1843 return true; 1844 1845 // Consider only compare instructions uniquely controlling a branch 1846 CmpInst::Predicate Pred; 1847 if (!match(const_cast<User *>(U), 1848 m_c_ICmp(Pred, m_Specific(V), m_Zero())) || 1849 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) 1850 continue; 1851 1852 for (auto *CmpU : U->users()) { 1853 if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) { 1854 assert(BI->isConditional() && "uses a comparison!"); 1855 1856 BasicBlock *NonNullSuccessor = 1857 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); 1858 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 1859 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 1860 return true; 1861 } else if (Pred == ICmpInst::ICMP_NE && 1862 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) && 1863 DT->dominates(cast<Instruction>(CmpU), CtxI)) { 1864 return true; 1865 } 1866 } 1867 } 1868 1869 return false; 1870 } 1871 1872 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1873 /// ensure that the value it's attached to is never Value? 'RangeType' is 1874 /// is the type of the value described by the range. 1875 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 1876 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1877 assert(NumRanges >= 1); 1878 for (unsigned i = 0; i < NumRanges; ++i) { 1879 ConstantInt *Lower = 1880 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1881 ConstantInt *Upper = 1882 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1883 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1884 if (Range.contains(Value)) 1885 return false; 1886 } 1887 return true; 1888 } 1889 1890 /// Return true if the given value is known to be non-zero when defined. For 1891 /// vectors, return true if every element is known to be non-zero when 1892 /// defined. For pointers, if the context instruction and dominator tree are 1893 /// specified, perform context-sensitive analysis and return true if the 1894 /// pointer couldn't possibly be null at the specified instruction. 1895 /// Supports values with integer or pointer type and vectors of integers. 1896 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) { 1897 if (auto *C = dyn_cast<Constant>(V)) { 1898 if (C->isNullValue()) 1899 return false; 1900 if (isa<ConstantInt>(C)) 1901 // Must be non-zero due to null test above. 1902 return true; 1903 1904 // For constant vectors, check that all elements are undefined or known 1905 // non-zero to determine that the whole vector is known non-zero. 1906 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) { 1907 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 1908 Constant *Elt = C->getAggregateElement(i); 1909 if (!Elt || Elt->isNullValue()) 1910 return false; 1911 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 1912 return false; 1913 } 1914 return true; 1915 } 1916 1917 // A global variable in address space 0 is non null unless extern weak 1918 // or an absolute symbol reference. Other address spaces may have null as a 1919 // valid address for a global, so we can't assume anything. 1920 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 1921 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 1922 GV->getType()->getAddressSpace() == 0) 1923 return true; 1924 } else 1925 return false; 1926 } 1927 1928 if (auto *I = dyn_cast<Instruction>(V)) { 1929 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) { 1930 // If the possible ranges don't contain zero, then the value is 1931 // definitely non-zero. 1932 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 1933 const APInt ZeroValue(Ty->getBitWidth(), 0); 1934 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 1935 return true; 1936 } 1937 } 1938 } 1939 1940 // Check for pointer simplifications. 1941 if (V->getType()->isPointerTy()) { 1942 // Alloca never returns null, malloc might. 1943 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) 1944 return true; 1945 1946 // A byval, inalloca, or nonnull argument is never null. 1947 if (const Argument *A = dyn_cast<Argument>(V)) 1948 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr()) 1949 return true; 1950 1951 // A Load tagged with nonnull metadata is never null. 1952 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 1953 if (LI->getMetadata(LLVMContext::MD_nonnull)) 1954 return true; 1955 1956 if (auto CS = ImmutableCallSite(V)) 1957 if (CS.isReturnNonNull()) 1958 return true; 1959 } 1960 1961 // The remaining tests are all recursive, so bail out if we hit the limit. 1962 if (Depth++ >= MaxDepth) 1963 return false; 1964 1965 // Check for recursive pointer simplifications. 1966 if (V->getType()->isPointerTy()) { 1967 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) 1968 return true; 1969 1970 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 1971 if (isGEPKnownNonNull(GEP, Depth, Q)) 1972 return true; 1973 } 1974 1975 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 1976 1977 // X | Y != 0 if X != 0 or Y != 0. 1978 Value *X = nullptr, *Y = nullptr; 1979 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 1980 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q); 1981 1982 // ext X != 0 if X != 0. 1983 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 1984 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 1985 1986 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 1987 // if the lowest bit is shifted off the end. 1988 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { 1989 // shl nuw can't remove any non-zero bits. 1990 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1991 if (BO->hasNoUnsignedWrap()) 1992 return isKnownNonZero(X, Depth, Q); 1993 1994 KnownBits Known(BitWidth); 1995 computeKnownBits(X, Known, Depth, Q); 1996 if (Known.One[0]) 1997 return true; 1998 } 1999 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 2000 // defined if the sign bit is shifted off the end. 2001 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 2002 // shr exact can only shift out zero bits. 2003 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 2004 if (BO->isExact()) 2005 return isKnownNonZero(X, Depth, Q); 2006 2007 KnownBits Known = computeKnownBits(X, Depth, Q); 2008 if (Known.isNegative()) 2009 return true; 2010 2011 // If the shifter operand is a constant, and all of the bits shifted 2012 // out are known to be zero, and X is known non-zero then at least one 2013 // non-zero bit must remain. 2014 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 2015 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 2016 // Is there a known one in the portion not shifted out? 2017 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) 2018 return true; 2019 // Are all the bits to be shifted out known zero? 2020 if (Known.countMinTrailingZeros() >= ShiftVal) 2021 return isKnownNonZero(X, Depth, Q); 2022 } 2023 } 2024 // div exact can only produce a zero if the dividend is zero. 2025 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 2026 return isKnownNonZero(X, Depth, Q); 2027 } 2028 // X + Y. 2029 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2030 KnownBits XKnown = computeKnownBits(X, Depth, Q); 2031 KnownBits YKnown = computeKnownBits(Y, Depth, Q); 2032 2033 // If X and Y are both non-negative (as signed values) then their sum is not 2034 // zero unless both X and Y are zero. 2035 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 2036 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q)) 2037 return true; 2038 2039 // If X and Y are both negative (as signed values) then their sum is not 2040 // zero unless both X and Y equal INT_MIN. 2041 if (XKnown.isNegative() && YKnown.isNegative()) { 2042 APInt Mask = APInt::getSignedMaxValue(BitWidth); 2043 // The sign bit of X is set. If some other bit is set then X is not equal 2044 // to INT_MIN. 2045 if (XKnown.One.intersects(Mask)) 2046 return true; 2047 // The sign bit of Y is set. If some other bit is set then Y is not equal 2048 // to INT_MIN. 2049 if (YKnown.One.intersects(Mask)) 2050 return true; 2051 } 2052 2053 // The sum of a non-negative number and a power of two is not zero. 2054 if (XKnown.isNonNegative() && 2055 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 2056 return true; 2057 if (YKnown.isNonNegative() && 2058 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 2059 return true; 2060 } 2061 // X * Y. 2062 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 2063 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2064 // If X and Y are non-zero then so is X * Y as long as the multiplication 2065 // does not overflow. 2066 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 2067 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q)) 2068 return true; 2069 } 2070 // (C ? X : Y) != 0 if X != 0 and Y != 0. 2071 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 2072 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) && 2073 isKnownNonZero(SI->getFalseValue(), Depth, Q)) 2074 return true; 2075 } 2076 // PHI 2077 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 2078 // Try and detect a recurrence that monotonically increases from a 2079 // starting value, as these are common as induction variables. 2080 if (PN->getNumIncomingValues() == 2) { 2081 Value *Start = PN->getIncomingValue(0); 2082 Value *Induction = PN->getIncomingValue(1); 2083 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 2084 std::swap(Start, Induction); 2085 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 2086 if (!C->isZero() && !C->isNegative()) { 2087 ConstantInt *X; 2088 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 2089 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 2090 !X->isNegative()) 2091 return true; 2092 } 2093 } 2094 } 2095 // Check if all incoming values are non-zero constant. 2096 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) { 2097 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero(); 2098 }); 2099 if (AllNonZeroConstants) 2100 return true; 2101 } 2102 2103 KnownBits Known(BitWidth); 2104 computeKnownBits(V, Known, Depth, Q); 2105 return Known.One != 0; 2106 } 2107 2108 /// Return true if V2 == V1 + X, where X is known non-zero. 2109 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { 2110 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2111 if (!BO || BO->getOpcode() != Instruction::Add) 2112 return false; 2113 Value *Op = nullptr; 2114 if (V2 == BO->getOperand(0)) 2115 Op = BO->getOperand(1); 2116 else if (V2 == BO->getOperand(1)) 2117 Op = BO->getOperand(0); 2118 else 2119 return false; 2120 return isKnownNonZero(Op, 0, Q); 2121 } 2122 2123 /// Return true if it is known that V1 != V2. 2124 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { 2125 if (V1 == V2) 2126 return false; 2127 if (V1->getType() != V2->getType()) 2128 // We can't look through casts yet. 2129 return false; 2130 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) 2131 return true; 2132 2133 if (V1->getType()->isIntOrIntVectorTy()) { 2134 // Are any known bits in V1 contradictory to known bits in V2? If V1 2135 // has a known zero where V2 has a known one, they must not be equal. 2136 KnownBits Known1 = computeKnownBits(V1, 0, Q); 2137 KnownBits Known2 = computeKnownBits(V2, 0, Q); 2138 2139 if (Known1.Zero.intersects(Known2.One) || 2140 Known2.Zero.intersects(Known1.One)) 2141 return true; 2142 } 2143 return false; 2144 } 2145 2146 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2147 /// simplify operations downstream. Mask is known to be zero for bits that V 2148 /// cannot have. 2149 /// 2150 /// This function is defined on values with integer type, values with pointer 2151 /// type, and vectors of integers. In the case 2152 /// where V is a vector, the mask, known zero, and known one values are the 2153 /// same width as the vector element, and the bit is set only if it is true 2154 /// for all of the elements in the vector. 2155 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2156 const Query &Q) { 2157 KnownBits Known(Mask.getBitWidth()); 2158 computeKnownBits(V, Known, Depth, Q); 2159 return Mask.isSubsetOf(Known.Zero); 2160 } 2161 2162 /// For vector constants, loop over the elements and find the constant with the 2163 /// minimum number of sign bits. Return 0 if the value is not a vector constant 2164 /// or if any element was not analyzed; otherwise, return the count for the 2165 /// element with the minimum number of sign bits. 2166 static unsigned computeNumSignBitsVectorConstant(const Value *V, 2167 unsigned TyBits) { 2168 const auto *CV = dyn_cast<Constant>(V); 2169 if (!CV || !CV->getType()->isVectorTy()) 2170 return 0; 2171 2172 unsigned MinSignBits = TyBits; 2173 unsigned NumElts = CV->getType()->getVectorNumElements(); 2174 for (unsigned i = 0; i != NumElts; ++i) { 2175 // If we find a non-ConstantInt, bail out. 2176 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 2177 if (!Elt) 2178 return 0; 2179 2180 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); 2181 } 2182 2183 return MinSignBits; 2184 } 2185 2186 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2187 const Query &Q); 2188 2189 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 2190 const Query &Q) { 2191 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q); 2192 assert(Result > 0 && "At least one sign bit needs to be present!"); 2193 return Result; 2194 } 2195 2196 /// Return the number of times the sign bit of the register is replicated into 2197 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2198 /// (itself), but other cases can give us information. For example, immediately 2199 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2200 /// other, so we return 3. For vectors, return the number of sign bits for the 2201 /// vector element with the minimum number of known sign bits. 2202 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2203 const Query &Q) { 2204 assert(Depth <= MaxDepth && "Limit Search Depth"); 2205 2206 // We return the minimum number of sign bits that are guaranteed to be present 2207 // in V, so for undef we have to conservatively return 1. We don't have the 2208 // same behavior for poison though -- that's a FIXME today. 2209 2210 Type *ScalarTy = V->getType()->getScalarType(); 2211 unsigned TyBits = ScalarTy->isPointerTy() ? 2212 Q.DL.getIndexTypeSizeInBits(ScalarTy) : 2213 Q.DL.getTypeSizeInBits(ScalarTy); 2214 2215 unsigned Tmp, Tmp2; 2216 unsigned FirstAnswer = 1; 2217 2218 // Note that ConstantInt is handled by the general computeKnownBits case 2219 // below. 2220 2221 if (Depth == MaxDepth) 2222 return 1; // Limit search depth. 2223 2224 const Operator *U = dyn_cast<Operator>(V); 2225 switch (Operator::getOpcode(V)) { 2226 default: break; 2227 case Instruction::SExt: 2228 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2229 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 2230 2231 case Instruction::SDiv: { 2232 const APInt *Denominator; 2233 // sdiv X, C -> adds log(C) sign bits. 2234 if (match(U->getOperand(1), m_APInt(Denominator))) { 2235 2236 // Ignore non-positive denominator. 2237 if (!Denominator->isStrictlyPositive()) 2238 break; 2239 2240 // Calculate the incoming numerator bits. 2241 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2242 2243 // Add floor(log(C)) bits to the numerator bits. 2244 return std::min(TyBits, NumBits + Denominator->logBase2()); 2245 } 2246 break; 2247 } 2248 2249 case Instruction::SRem: { 2250 const APInt *Denominator; 2251 // srem X, C -> we know that the result is within [-C+1,C) when C is a 2252 // positive constant. This let us put a lower bound on the number of sign 2253 // bits. 2254 if (match(U->getOperand(1), m_APInt(Denominator))) { 2255 2256 // Ignore non-positive denominator. 2257 if (!Denominator->isStrictlyPositive()) 2258 break; 2259 2260 // Calculate the incoming numerator bits. SRem by a positive constant 2261 // can't lower the number of sign bits. 2262 unsigned NumrBits = 2263 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2264 2265 // Calculate the leading sign bit constraints by examining the 2266 // denominator. Given that the denominator is positive, there are two 2267 // cases: 2268 // 2269 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2270 // (1 << ceilLogBase2(C)). 2271 // 2272 // 2. the numerator is negative. Then the result range is (-C,0] and 2273 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2274 // 2275 // Thus a lower bound on the number of sign bits is `TyBits - 2276 // ceilLogBase2(C)`. 2277 2278 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2279 return std::max(NumrBits, ResBits); 2280 } 2281 break; 2282 } 2283 2284 case Instruction::AShr: { 2285 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2286 // ashr X, C -> adds C sign bits. Vectors too. 2287 const APInt *ShAmt; 2288 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2289 if (ShAmt->uge(TyBits)) 2290 break; // Bad shift. 2291 unsigned ShAmtLimited = ShAmt->getZExtValue(); 2292 Tmp += ShAmtLimited; 2293 if (Tmp > TyBits) Tmp = TyBits; 2294 } 2295 return Tmp; 2296 } 2297 case Instruction::Shl: { 2298 const APInt *ShAmt; 2299 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2300 // shl destroys sign bits. 2301 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2302 if (ShAmt->uge(TyBits) || // Bad shift. 2303 ShAmt->uge(Tmp)) break; // Shifted all sign bits out. 2304 Tmp2 = ShAmt->getZExtValue(); 2305 return Tmp - Tmp2; 2306 } 2307 break; 2308 } 2309 case Instruction::And: 2310 case Instruction::Or: 2311 case Instruction::Xor: // NOT is handled here. 2312 // Logical binary ops preserve the number of sign bits at the worst. 2313 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2314 if (Tmp != 1) { 2315 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2316 FirstAnswer = std::min(Tmp, Tmp2); 2317 // We computed what we know about the sign bits as our first 2318 // answer. Now proceed to the generic code that uses 2319 // computeKnownBits, and pick whichever answer is better. 2320 } 2321 break; 2322 2323 case Instruction::Select: 2324 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2325 if (Tmp == 1) return 1; // Early out. 2326 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 2327 return std::min(Tmp, Tmp2); 2328 2329 case Instruction::Add: 2330 // Add can have at most one carry bit. Thus we know that the output 2331 // is, at worst, one more bit than the inputs. 2332 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2333 if (Tmp == 1) return 1; // Early out. 2334 2335 // Special case decrementing a value (ADD X, -1): 2336 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2337 if (CRHS->isAllOnesValue()) { 2338 KnownBits Known(TyBits); 2339 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 2340 2341 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2342 // sign bits set. 2343 if ((Known.Zero | 1).isAllOnesValue()) 2344 return TyBits; 2345 2346 // If we are subtracting one from a positive number, there is no carry 2347 // out of the result. 2348 if (Known.isNonNegative()) 2349 return Tmp; 2350 } 2351 2352 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2353 if (Tmp2 == 1) return 1; 2354 return std::min(Tmp, Tmp2)-1; 2355 2356 case Instruction::Sub: 2357 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2358 if (Tmp2 == 1) return 1; 2359 2360 // Handle NEG. 2361 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2362 if (CLHS->isNullValue()) { 2363 KnownBits Known(TyBits); 2364 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 2365 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2366 // sign bits set. 2367 if ((Known.Zero | 1).isAllOnesValue()) 2368 return TyBits; 2369 2370 // If the input is known to be positive (the sign bit is known clear), 2371 // the output of the NEG has the same number of sign bits as the input. 2372 if (Known.isNonNegative()) 2373 return Tmp2; 2374 2375 // Otherwise, we treat this like a SUB. 2376 } 2377 2378 // Sub can have at most one carry bit. Thus we know that the output 2379 // is, at worst, one more bit than the inputs. 2380 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2381 if (Tmp == 1) return 1; // Early out. 2382 return std::min(Tmp, Tmp2)-1; 2383 2384 case Instruction::Mul: { 2385 // The output of the Mul can be at most twice the valid bits in the inputs. 2386 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2387 if (SignBitsOp0 == 1) return 1; // Early out. 2388 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2389 if (SignBitsOp1 == 1) return 1; 2390 unsigned OutValidBits = 2391 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); 2392 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; 2393 } 2394 2395 case Instruction::PHI: { 2396 const PHINode *PN = cast<PHINode>(U); 2397 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2398 // Don't analyze large in-degree PHIs. 2399 if (NumIncomingValues > 4) break; 2400 // Unreachable blocks may have zero-operand PHI nodes. 2401 if (NumIncomingValues == 0) break; 2402 2403 // Take the minimum of all incoming values. This can't infinitely loop 2404 // because of our depth threshold. 2405 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q); 2406 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2407 if (Tmp == 1) return Tmp; 2408 Tmp = std::min( 2409 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q)); 2410 } 2411 return Tmp; 2412 } 2413 2414 case Instruction::Trunc: 2415 // FIXME: it's tricky to do anything useful for this, but it is an important 2416 // case for targets like X86. 2417 break; 2418 2419 case Instruction::ExtractElement: 2420 // Look through extract element. At the moment we keep this simple and skip 2421 // tracking the specific element. But at least we might find information 2422 // valid for all elements of the vector (for example if vector is sign 2423 // extended, shifted, etc). 2424 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2425 } 2426 2427 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2428 // use this information. 2429 2430 // If we can examine all elements of a vector constant successfully, we're 2431 // done (we can't do any better than that). If not, keep trying. 2432 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits)) 2433 return VecSignBits; 2434 2435 KnownBits Known(TyBits); 2436 computeKnownBits(V, Known, Depth, Q); 2437 2438 // If we know that the sign bit is either zero or one, determine the number of 2439 // identical bits in the top of the input value. 2440 return std::max(FirstAnswer, Known.countMinSignBits()); 2441 } 2442 2443 /// This function computes the integer multiple of Base that equals V. 2444 /// If successful, it returns true and returns the multiple in 2445 /// Multiple. If unsuccessful, it returns false. It looks 2446 /// through SExt instructions only if LookThroughSExt is true. 2447 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2448 bool LookThroughSExt, unsigned Depth) { 2449 const unsigned MaxDepth = 6; 2450 2451 assert(V && "No Value?"); 2452 assert(Depth <= MaxDepth && "Limit Search Depth"); 2453 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2454 2455 Type *T = V->getType(); 2456 2457 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2458 2459 if (Base == 0) 2460 return false; 2461 2462 if (Base == 1) { 2463 Multiple = V; 2464 return true; 2465 } 2466 2467 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2468 Constant *BaseVal = ConstantInt::get(T, Base); 2469 if (CO && CO == BaseVal) { 2470 // Multiple is 1. 2471 Multiple = ConstantInt::get(T, 1); 2472 return true; 2473 } 2474 2475 if (CI && CI->getZExtValue() % Base == 0) { 2476 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2477 return true; 2478 } 2479 2480 if (Depth == MaxDepth) return false; // Limit search depth. 2481 2482 Operator *I = dyn_cast<Operator>(V); 2483 if (!I) return false; 2484 2485 switch (I->getOpcode()) { 2486 default: break; 2487 case Instruction::SExt: 2488 if (!LookThroughSExt) return false; 2489 // otherwise fall through to ZExt 2490 LLVM_FALLTHROUGH; 2491 case Instruction::ZExt: 2492 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2493 LookThroughSExt, Depth+1); 2494 case Instruction::Shl: 2495 case Instruction::Mul: { 2496 Value *Op0 = I->getOperand(0); 2497 Value *Op1 = I->getOperand(1); 2498 2499 if (I->getOpcode() == Instruction::Shl) { 2500 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2501 if (!Op1CI) return false; 2502 // Turn Op0 << Op1 into Op0 * 2^Op1 2503 APInt Op1Int = Op1CI->getValue(); 2504 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2505 APInt API(Op1Int.getBitWidth(), 0); 2506 API.setBit(BitToSet); 2507 Op1 = ConstantInt::get(V->getContext(), API); 2508 } 2509 2510 Value *Mul0 = nullptr; 2511 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2512 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2513 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2514 if (Op1C->getType()->getPrimitiveSizeInBits() < 2515 MulC->getType()->getPrimitiveSizeInBits()) 2516 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2517 if (Op1C->getType()->getPrimitiveSizeInBits() > 2518 MulC->getType()->getPrimitiveSizeInBits()) 2519 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2520 2521 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2522 Multiple = ConstantExpr::getMul(MulC, Op1C); 2523 return true; 2524 } 2525 2526 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2527 if (Mul0CI->getValue() == 1) { 2528 // V == Base * Op1, so return Op1 2529 Multiple = Op1; 2530 return true; 2531 } 2532 } 2533 2534 Value *Mul1 = nullptr; 2535 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2536 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2537 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2538 if (Op0C->getType()->getPrimitiveSizeInBits() < 2539 MulC->getType()->getPrimitiveSizeInBits()) 2540 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2541 if (Op0C->getType()->getPrimitiveSizeInBits() > 2542 MulC->getType()->getPrimitiveSizeInBits()) 2543 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2544 2545 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2546 Multiple = ConstantExpr::getMul(MulC, Op0C); 2547 return true; 2548 } 2549 2550 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2551 if (Mul1CI->getValue() == 1) { 2552 // V == Base * Op0, so return Op0 2553 Multiple = Op0; 2554 return true; 2555 } 2556 } 2557 } 2558 } 2559 2560 // We could not determine if V is a multiple of Base. 2561 return false; 2562 } 2563 2564 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS, 2565 const TargetLibraryInfo *TLI) { 2566 const Function *F = ICS.getCalledFunction(); 2567 if (!F) 2568 return Intrinsic::not_intrinsic; 2569 2570 if (F->isIntrinsic()) 2571 return F->getIntrinsicID(); 2572 2573 if (!TLI) 2574 return Intrinsic::not_intrinsic; 2575 2576 LibFunc Func; 2577 // We're going to make assumptions on the semantics of the functions, check 2578 // that the target knows that it's available in this environment and it does 2579 // not have local linkage. 2580 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func)) 2581 return Intrinsic::not_intrinsic; 2582 2583 if (!ICS.onlyReadsMemory()) 2584 return Intrinsic::not_intrinsic; 2585 2586 // Otherwise check if we have a call to a function that can be turned into a 2587 // vector intrinsic. 2588 switch (Func) { 2589 default: 2590 break; 2591 case LibFunc_sin: 2592 case LibFunc_sinf: 2593 case LibFunc_sinl: 2594 return Intrinsic::sin; 2595 case LibFunc_cos: 2596 case LibFunc_cosf: 2597 case LibFunc_cosl: 2598 return Intrinsic::cos; 2599 case LibFunc_exp: 2600 case LibFunc_expf: 2601 case LibFunc_expl: 2602 return Intrinsic::exp; 2603 case LibFunc_exp2: 2604 case LibFunc_exp2f: 2605 case LibFunc_exp2l: 2606 return Intrinsic::exp2; 2607 case LibFunc_log: 2608 case LibFunc_logf: 2609 case LibFunc_logl: 2610 return Intrinsic::log; 2611 case LibFunc_log10: 2612 case LibFunc_log10f: 2613 case LibFunc_log10l: 2614 return Intrinsic::log10; 2615 case LibFunc_log2: 2616 case LibFunc_log2f: 2617 case LibFunc_log2l: 2618 return Intrinsic::log2; 2619 case LibFunc_fabs: 2620 case LibFunc_fabsf: 2621 case LibFunc_fabsl: 2622 return Intrinsic::fabs; 2623 case LibFunc_fmin: 2624 case LibFunc_fminf: 2625 case LibFunc_fminl: 2626 return Intrinsic::minnum; 2627 case LibFunc_fmax: 2628 case LibFunc_fmaxf: 2629 case LibFunc_fmaxl: 2630 return Intrinsic::maxnum; 2631 case LibFunc_copysign: 2632 case LibFunc_copysignf: 2633 case LibFunc_copysignl: 2634 return Intrinsic::copysign; 2635 case LibFunc_floor: 2636 case LibFunc_floorf: 2637 case LibFunc_floorl: 2638 return Intrinsic::floor; 2639 case LibFunc_ceil: 2640 case LibFunc_ceilf: 2641 case LibFunc_ceill: 2642 return Intrinsic::ceil; 2643 case LibFunc_trunc: 2644 case LibFunc_truncf: 2645 case LibFunc_truncl: 2646 return Intrinsic::trunc; 2647 case LibFunc_rint: 2648 case LibFunc_rintf: 2649 case LibFunc_rintl: 2650 return Intrinsic::rint; 2651 case LibFunc_nearbyint: 2652 case LibFunc_nearbyintf: 2653 case LibFunc_nearbyintl: 2654 return Intrinsic::nearbyint; 2655 case LibFunc_round: 2656 case LibFunc_roundf: 2657 case LibFunc_roundl: 2658 return Intrinsic::round; 2659 case LibFunc_pow: 2660 case LibFunc_powf: 2661 case LibFunc_powl: 2662 return Intrinsic::pow; 2663 case LibFunc_sqrt: 2664 case LibFunc_sqrtf: 2665 case LibFunc_sqrtl: 2666 return Intrinsic::sqrt; 2667 } 2668 2669 return Intrinsic::not_intrinsic; 2670 } 2671 2672 /// Return true if we can prove that the specified FP value is never equal to 2673 /// -0.0. 2674 /// 2675 /// NOTE: this function will need to be revisited when we support non-default 2676 /// rounding modes! 2677 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 2678 unsigned Depth) { 2679 if (auto *CFP = dyn_cast<ConstantFP>(V)) 2680 return !CFP->getValueAPF().isNegZero(); 2681 2682 // Limit search depth. 2683 if (Depth == MaxDepth) 2684 return false; 2685 2686 auto *Op = dyn_cast<Operator>(V); 2687 if (!Op) 2688 return false; 2689 2690 // Check if the nsz fast-math flag is set. 2691 if (auto *FPO = dyn_cast<FPMathOperator>(Op)) 2692 if (FPO->hasNoSignedZeros()) 2693 return true; 2694 2695 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. 2696 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP()))) 2697 return true; 2698 2699 // sitofp and uitofp turn into +0.0 for zero. 2700 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) 2701 return true; 2702 2703 if (auto *Call = dyn_cast<CallInst>(Op)) { 2704 Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI); 2705 switch (IID) { 2706 default: 2707 break; 2708 // sqrt(-0.0) = -0.0, no other negative results are possible. 2709 case Intrinsic::sqrt: 2710 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); 2711 // fabs(x) != -0.0 2712 case Intrinsic::fabs: 2713 return true; 2714 } 2715 } 2716 2717 return false; 2718 } 2719 2720 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 2721 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 2722 /// bit despite comparing equal. 2723 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 2724 const TargetLibraryInfo *TLI, 2725 bool SignBitOnly, 2726 unsigned Depth) { 2727 // TODO: This function does not do the right thing when SignBitOnly is true 2728 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 2729 // which flips the sign bits of NaNs. See 2730 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2731 2732 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2733 return !CFP->getValueAPF().isNegative() || 2734 (!SignBitOnly && CFP->getValueAPF().isZero()); 2735 } 2736 2737 // Handle vector of constants. 2738 if (auto *CV = dyn_cast<Constant>(V)) { 2739 if (CV->getType()->isVectorTy()) { 2740 unsigned NumElts = CV->getType()->getVectorNumElements(); 2741 for (unsigned i = 0; i != NumElts; ++i) { 2742 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 2743 if (!CFP) 2744 return false; 2745 if (CFP->getValueAPF().isNegative() && 2746 (SignBitOnly || !CFP->getValueAPF().isZero())) 2747 return false; 2748 } 2749 2750 // All non-negative ConstantFPs. 2751 return true; 2752 } 2753 } 2754 2755 if (Depth == MaxDepth) 2756 return false; // Limit search depth. 2757 2758 const Operator *I = dyn_cast<Operator>(V); 2759 if (!I) 2760 return false; 2761 2762 switch (I->getOpcode()) { 2763 default: 2764 break; 2765 // Unsigned integers are always nonnegative. 2766 case Instruction::UIToFP: 2767 return true; 2768 case Instruction::FMul: 2769 // x*x is always non-negative or a NaN. 2770 if (I->getOperand(0) == I->getOperand(1) && 2771 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 2772 return true; 2773 2774 LLVM_FALLTHROUGH; 2775 case Instruction::FAdd: 2776 case Instruction::FDiv: 2777 case Instruction::FRem: 2778 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2779 Depth + 1) && 2780 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2781 Depth + 1); 2782 case Instruction::Select: 2783 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2784 Depth + 1) && 2785 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2786 Depth + 1); 2787 case Instruction::FPExt: 2788 case Instruction::FPTrunc: 2789 // Widening/narrowing never change sign. 2790 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2791 Depth + 1); 2792 case Instruction::ExtractElement: 2793 // Look through extract element. At the moment we keep this simple and skip 2794 // tracking the specific element. But at least we might find information 2795 // valid for all elements of the vector. 2796 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2797 Depth + 1); 2798 case Instruction::Call: 2799 const auto *CI = cast<CallInst>(I); 2800 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2801 switch (IID) { 2802 default: 2803 break; 2804 case Intrinsic::maxnum: 2805 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2806 Depth + 1) || 2807 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2808 Depth + 1); 2809 case Intrinsic::minnum: 2810 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2811 Depth + 1) && 2812 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2813 Depth + 1); 2814 case Intrinsic::exp: 2815 case Intrinsic::exp2: 2816 case Intrinsic::fabs: 2817 return true; 2818 2819 case Intrinsic::sqrt: 2820 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 2821 if (!SignBitOnly) 2822 return true; 2823 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 2824 CannotBeNegativeZero(CI->getOperand(0), TLI)); 2825 2826 case Intrinsic::powi: 2827 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 2828 // powi(x,n) is non-negative if n is even. 2829 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 2830 return true; 2831 } 2832 // TODO: This is not correct. Given that exp is an integer, here are the 2833 // ways that pow can return a negative value: 2834 // 2835 // pow(x, exp) --> negative if exp is odd and x is negative. 2836 // pow(-0, exp) --> -inf if exp is negative odd. 2837 // pow(-0, exp) --> -0 if exp is positive odd. 2838 // pow(-inf, exp) --> -0 if exp is negative odd. 2839 // pow(-inf, exp) --> -inf if exp is positive odd. 2840 // 2841 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 2842 // but we must return false if x == -0. Unfortunately we do not currently 2843 // have a way of expressing this constraint. See details in 2844 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2845 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2846 Depth + 1); 2847 2848 case Intrinsic::fma: 2849 case Intrinsic::fmuladd: 2850 // x*x+y is non-negative if y is non-negative. 2851 return I->getOperand(0) == I->getOperand(1) && 2852 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 2853 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2854 Depth + 1); 2855 } 2856 break; 2857 } 2858 return false; 2859 } 2860 2861 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 2862 const TargetLibraryInfo *TLI) { 2863 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 2864 } 2865 2866 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 2867 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 2868 } 2869 2870 bool llvm::isKnownNeverNaN(const Value *V) { 2871 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"); 2872 2873 // If we're told that NaNs won't happen, assume they won't. 2874 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 2875 if (FPMathOp->hasNoNaNs()) 2876 return true; 2877 2878 // TODO: Handle instructions and potentially recurse like other 'isKnown' 2879 // functions. For example, the result of sitofp is never NaN. 2880 2881 // Handle scalar constants. 2882 if (auto *CFP = dyn_cast<ConstantFP>(V)) 2883 return !CFP->isNaN(); 2884 2885 // Bail out for constant expressions, but try to handle vector constants. 2886 if (!V->getType()->isVectorTy() || !isa<Constant>(V)) 2887 return false; 2888 2889 // For vectors, verify that each element is not NaN. 2890 unsigned NumElts = V->getType()->getVectorNumElements(); 2891 for (unsigned i = 0; i != NumElts; ++i) { 2892 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 2893 if (!Elt) 2894 return false; 2895 if (isa<UndefValue>(Elt)) 2896 continue; 2897 auto *CElt = dyn_cast<ConstantFP>(Elt); 2898 if (!CElt || CElt->isNaN()) 2899 return false; 2900 } 2901 // All elements were confirmed not-NaN or undefined. 2902 return true; 2903 } 2904 2905 /// If the specified value can be set by repeating the same byte in memory, 2906 /// return the i8 value that it is represented with. This is 2907 /// true for all i8 values obviously, but is also true for i32 0, i32 -1, 2908 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 2909 /// byte store (e.g. i16 0x1234), return null. 2910 Value *llvm::isBytewiseValue(Value *V) { 2911 // All byte-wide stores are splatable, even of arbitrary variables. 2912 if (V->getType()->isIntegerTy(8)) return V; 2913 2914 // Handle 'null' ConstantArrayZero etc. 2915 if (Constant *C = dyn_cast<Constant>(V)) 2916 if (C->isNullValue()) 2917 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 2918 2919 // Constant float and double values can be handled as integer values if the 2920 // corresponding integer value is "byteable". An important case is 0.0. 2921 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2922 if (CFP->getType()->isFloatTy()) 2923 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 2924 if (CFP->getType()->isDoubleTy()) 2925 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 2926 // Don't handle long double formats, which have strange constraints. 2927 } 2928 2929 // We can handle constant integers that are multiple of 8 bits. 2930 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 2931 if (CI->getBitWidth() % 8 == 0) { 2932 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 2933 2934 if (!CI->getValue().isSplat(8)) 2935 return nullptr; 2936 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8)); 2937 } 2938 } 2939 2940 // A ConstantDataArray/Vector is splatable if all its members are equal and 2941 // also splatable. 2942 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 2943 Value *Elt = CA->getElementAsConstant(0); 2944 Value *Val = isBytewiseValue(Elt); 2945 if (!Val) 2946 return nullptr; 2947 2948 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 2949 if (CA->getElementAsConstant(I) != Elt) 2950 return nullptr; 2951 2952 return Val; 2953 } 2954 2955 // Conceptually, we could handle things like: 2956 // %a = zext i8 %X to i16 2957 // %b = shl i16 %a, 8 2958 // %c = or i16 %a, %b 2959 // but until there is an example that actually needs this, it doesn't seem 2960 // worth worrying about. 2961 return nullptr; 2962 } 2963 2964 // This is the recursive version of BuildSubAggregate. It takes a few different 2965 // arguments. Idxs is the index within the nested struct From that we are 2966 // looking at now (which is of type IndexedType). IdxSkip is the number of 2967 // indices from Idxs that should be left out when inserting into the resulting 2968 // struct. To is the result struct built so far, new insertvalue instructions 2969 // build on that. 2970 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 2971 SmallVectorImpl<unsigned> &Idxs, 2972 unsigned IdxSkip, 2973 Instruction *InsertBefore) { 2974 StructType *STy = dyn_cast<StructType>(IndexedType); 2975 if (STy) { 2976 // Save the original To argument so we can modify it 2977 Value *OrigTo = To; 2978 // General case, the type indexed by Idxs is a struct 2979 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2980 // Process each struct element recursively 2981 Idxs.push_back(i); 2982 Value *PrevTo = To; 2983 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 2984 InsertBefore); 2985 Idxs.pop_back(); 2986 if (!To) { 2987 // Couldn't find any inserted value for this index? Cleanup 2988 while (PrevTo != OrigTo) { 2989 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 2990 PrevTo = Del->getAggregateOperand(); 2991 Del->eraseFromParent(); 2992 } 2993 // Stop processing elements 2994 break; 2995 } 2996 } 2997 // If we successfully found a value for each of our subaggregates 2998 if (To) 2999 return To; 3000 } 3001 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 3002 // the struct's elements had a value that was inserted directly. In the latter 3003 // case, perhaps we can't determine each of the subelements individually, but 3004 // we might be able to find the complete struct somewhere. 3005 3006 // Find the value that is at that particular spot 3007 Value *V = FindInsertedValue(From, Idxs); 3008 3009 if (!V) 3010 return nullptr; 3011 3012 // Insert the value in the new (sub) aggregate 3013 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 3014 "tmp", InsertBefore); 3015 } 3016 3017 // This helper takes a nested struct and extracts a part of it (which is again a 3018 // struct) into a new value. For example, given the struct: 3019 // { a, { b, { c, d }, e } } 3020 // and the indices "1, 1" this returns 3021 // { c, d }. 3022 // 3023 // It does this by inserting an insertvalue for each element in the resulting 3024 // struct, as opposed to just inserting a single struct. This will only work if 3025 // each of the elements of the substruct are known (ie, inserted into From by an 3026 // insertvalue instruction somewhere). 3027 // 3028 // All inserted insertvalue instructions are inserted before InsertBefore 3029 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 3030 Instruction *InsertBefore) { 3031 assert(InsertBefore && "Must have someplace to insert!"); 3032 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 3033 idx_range); 3034 Value *To = UndefValue::get(IndexedType); 3035 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 3036 unsigned IdxSkip = Idxs.size(); 3037 3038 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 3039 } 3040 3041 /// Given an aggregate and a sequence of indices, see if the scalar value 3042 /// indexed is already around as a register, for example if it was inserted 3043 /// directly into the aggregate. 3044 /// 3045 /// If InsertBefore is not null, this function will duplicate (modified) 3046 /// insertvalues when a part of a nested struct is extracted. 3047 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 3048 Instruction *InsertBefore) { 3049 // Nothing to index? Just return V then (this is useful at the end of our 3050 // recursion). 3051 if (idx_range.empty()) 3052 return V; 3053 // We have indices, so V should have an indexable type. 3054 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 3055 "Not looking at a struct or array?"); 3056 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 3057 "Invalid indices for type?"); 3058 3059 if (Constant *C = dyn_cast<Constant>(V)) { 3060 C = C->getAggregateElement(idx_range[0]); 3061 if (!C) return nullptr; 3062 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 3063 } 3064 3065 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 3066 // Loop the indices for the insertvalue instruction in parallel with the 3067 // requested indices 3068 const unsigned *req_idx = idx_range.begin(); 3069 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 3070 i != e; ++i, ++req_idx) { 3071 if (req_idx == idx_range.end()) { 3072 // We can't handle this without inserting insertvalues 3073 if (!InsertBefore) 3074 return nullptr; 3075 3076 // The requested index identifies a part of a nested aggregate. Handle 3077 // this specially. For example, 3078 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 3079 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 3080 // %C = extractvalue {i32, { i32, i32 } } %B, 1 3081 // This can be changed into 3082 // %A = insertvalue {i32, i32 } undef, i32 10, 0 3083 // %C = insertvalue {i32, i32 } %A, i32 11, 1 3084 // which allows the unused 0,0 element from the nested struct to be 3085 // removed. 3086 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 3087 InsertBefore); 3088 } 3089 3090 // This insert value inserts something else than what we are looking for. 3091 // See if the (aggregate) value inserted into has the value we are 3092 // looking for, then. 3093 if (*req_idx != *i) 3094 return FindInsertedValue(I->getAggregateOperand(), idx_range, 3095 InsertBefore); 3096 } 3097 // If we end up here, the indices of the insertvalue match with those 3098 // requested (though possibly only partially). Now we recursively look at 3099 // the inserted value, passing any remaining indices. 3100 return FindInsertedValue(I->getInsertedValueOperand(), 3101 makeArrayRef(req_idx, idx_range.end()), 3102 InsertBefore); 3103 } 3104 3105 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 3106 // If we're extracting a value from an aggregate that was extracted from 3107 // something else, we can extract from that something else directly instead. 3108 // However, we will need to chain I's indices with the requested indices. 3109 3110 // Calculate the number of indices required 3111 unsigned size = I->getNumIndices() + idx_range.size(); 3112 // Allocate some space to put the new indices in 3113 SmallVector<unsigned, 5> Idxs; 3114 Idxs.reserve(size); 3115 // Add indices from the extract value instruction 3116 Idxs.append(I->idx_begin(), I->idx_end()); 3117 3118 // Add requested indices 3119 Idxs.append(idx_range.begin(), idx_range.end()); 3120 3121 assert(Idxs.size() == size 3122 && "Number of indices added not correct?"); 3123 3124 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 3125 } 3126 // Otherwise, we don't know (such as, extracting from a function return value 3127 // or load instruction) 3128 return nullptr; 3129 } 3130 3131 /// Analyze the specified pointer to see if it can be expressed as a base 3132 /// pointer plus a constant offset. Return the base and offset to the caller. 3133 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 3134 const DataLayout &DL) { 3135 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType()); 3136 APInt ByteOffset(BitWidth, 0); 3137 3138 // We walk up the defs but use a visited set to handle unreachable code. In 3139 // that case, we stop after accumulating the cycle once (not that it 3140 // matters). 3141 SmallPtrSet<Value *, 16> Visited; 3142 while (Visited.insert(Ptr).second) { 3143 if (Ptr->getType()->isVectorTy()) 3144 break; 3145 3146 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 3147 // If one of the values we have visited is an addrspacecast, then 3148 // the pointer type of this GEP may be different from the type 3149 // of the Ptr parameter which was passed to this function. This 3150 // means when we construct GEPOffset, we need to use the size 3151 // of GEP's pointer type rather than the size of the original 3152 // pointer type. 3153 APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 3154 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 3155 break; 3156 3157 ByteOffset += GEPOffset.getSExtValue(); 3158 3159 Ptr = GEP->getPointerOperand(); 3160 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 3161 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 3162 Ptr = cast<Operator>(Ptr)->getOperand(0); 3163 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 3164 if (GA->isInterposable()) 3165 break; 3166 Ptr = GA->getAliasee(); 3167 } else { 3168 break; 3169 } 3170 } 3171 Offset = ByteOffset.getSExtValue(); 3172 return Ptr; 3173 } 3174 3175 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 3176 unsigned CharSize) { 3177 // Make sure the GEP has exactly three arguments. 3178 if (GEP->getNumOperands() != 3) 3179 return false; 3180 3181 // Make sure the index-ee is a pointer to array of \p CharSize integers. 3182 // CharSize. 3183 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 3184 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 3185 return false; 3186 3187 // Check to make sure that the first operand of the GEP is an integer and 3188 // has value 0 so that we are sure we're indexing into the initializer. 3189 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 3190 if (!FirstIdx || !FirstIdx->isZero()) 3191 return false; 3192 3193 return true; 3194 } 3195 3196 bool llvm::getConstantDataArrayInfo(const Value *V, 3197 ConstantDataArraySlice &Slice, 3198 unsigned ElementSize, uint64_t Offset) { 3199 assert(V); 3200 3201 // Look through bitcast instructions and geps. 3202 V = V->stripPointerCasts(); 3203 3204 // If the value is a GEP instruction or constant expression, treat it as an 3205 // offset. 3206 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3207 // The GEP operator should be based on a pointer to string constant, and is 3208 // indexing into the string constant. 3209 if (!isGEPBasedOnPointerToString(GEP, ElementSize)) 3210 return false; 3211 3212 // If the second index isn't a ConstantInt, then this is a variable index 3213 // into the array. If this occurs, we can't say anything meaningful about 3214 // the string. 3215 uint64_t StartIdx = 0; 3216 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 3217 StartIdx = CI->getZExtValue(); 3218 else 3219 return false; 3220 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize, 3221 StartIdx + Offset); 3222 } 3223 3224 // The GEP instruction, constant or instruction, must reference a global 3225 // variable that is a constant and is initialized. The referenced constant 3226 // initializer is the array that we'll use for optimization. 3227 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 3228 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 3229 return false; 3230 3231 const ConstantDataArray *Array; 3232 ArrayType *ArrayTy; 3233 if (GV->getInitializer()->isNullValue()) { 3234 Type *GVTy = GV->getValueType(); 3235 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) { 3236 // A zeroinitializer for the array; there is no ConstantDataArray. 3237 Array = nullptr; 3238 } else { 3239 const DataLayout &DL = GV->getParent()->getDataLayout(); 3240 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy); 3241 uint64_t Length = SizeInBytes / (ElementSize / 8); 3242 if (Length <= Offset) 3243 return false; 3244 3245 Slice.Array = nullptr; 3246 Slice.Offset = 0; 3247 Slice.Length = Length - Offset; 3248 return true; 3249 } 3250 } else { 3251 // This must be a ConstantDataArray. 3252 Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 3253 if (!Array) 3254 return false; 3255 ArrayTy = Array->getType(); 3256 } 3257 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize)) 3258 return false; 3259 3260 uint64_t NumElts = ArrayTy->getArrayNumElements(); 3261 if (Offset > NumElts) 3262 return false; 3263 3264 Slice.Array = Array; 3265 Slice.Offset = Offset; 3266 Slice.Length = NumElts - Offset; 3267 return true; 3268 } 3269 3270 /// This function computes the length of a null-terminated C string pointed to 3271 /// by V. If successful, it returns true and returns the string in Str. 3272 /// If unsuccessful, it returns false. 3273 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 3274 uint64_t Offset, bool TrimAtNul) { 3275 ConstantDataArraySlice Slice; 3276 if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) 3277 return false; 3278 3279 if (Slice.Array == nullptr) { 3280 if (TrimAtNul) { 3281 Str = StringRef(); 3282 return true; 3283 } 3284 if (Slice.Length == 1) { 3285 Str = StringRef("", 1); 3286 return true; 3287 } 3288 // We cannot instantiate a StringRef as we do not have an appropriate string 3289 // of 0s at hand. 3290 return false; 3291 } 3292 3293 // Start out with the entire array in the StringRef. 3294 Str = Slice.Array->getAsString(); 3295 // Skip over 'offset' bytes. 3296 Str = Str.substr(Slice.Offset); 3297 3298 if (TrimAtNul) { 3299 // Trim off the \0 and anything after it. If the array is not nul 3300 // terminated, we just return the whole end of string. The client may know 3301 // some other way that the string is length-bound. 3302 Str = Str.substr(0, Str.find('\0')); 3303 } 3304 return true; 3305 } 3306 3307 // These next two are very similar to the above, but also look through PHI 3308 // nodes. 3309 // TODO: See if we can integrate these two together. 3310 3311 /// If we can compute the length of the string pointed to by 3312 /// the specified pointer, return 'len+1'. If we can't, return 0. 3313 static uint64_t GetStringLengthH(const Value *V, 3314 SmallPtrSetImpl<const PHINode*> &PHIs, 3315 unsigned CharSize) { 3316 // Look through noop bitcast instructions. 3317 V = V->stripPointerCasts(); 3318 3319 // If this is a PHI node, there are two cases: either we have already seen it 3320 // or we haven't. 3321 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 3322 if (!PHIs.insert(PN).second) 3323 return ~0ULL; // already in the set. 3324 3325 // If it was new, see if all the input strings are the same length. 3326 uint64_t LenSoFar = ~0ULL; 3327 for (Value *IncValue : PN->incoming_values()) { 3328 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 3329 if (Len == 0) return 0; // Unknown length -> unknown. 3330 3331 if (Len == ~0ULL) continue; 3332 3333 if (Len != LenSoFar && LenSoFar != ~0ULL) 3334 return 0; // Disagree -> unknown. 3335 LenSoFar = Len; 3336 } 3337 3338 // Success, all agree. 3339 return LenSoFar; 3340 } 3341 3342 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 3343 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 3344 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 3345 if (Len1 == 0) return 0; 3346 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 3347 if (Len2 == 0) return 0; 3348 if (Len1 == ~0ULL) return Len2; 3349 if (Len2 == ~0ULL) return Len1; 3350 if (Len1 != Len2) return 0; 3351 return Len1; 3352 } 3353 3354 // Otherwise, see if we can read the string. 3355 ConstantDataArraySlice Slice; 3356 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 3357 return 0; 3358 3359 if (Slice.Array == nullptr) 3360 return 1; 3361 3362 // Search for nul characters 3363 unsigned NullIndex = 0; 3364 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 3365 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 3366 break; 3367 } 3368 3369 return NullIndex + 1; 3370 } 3371 3372 /// If we can compute the length of the string pointed to by 3373 /// the specified pointer, return 'len+1'. If we can't, return 0. 3374 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 3375 if (!V->getType()->isPointerTy()) return 0; 3376 3377 SmallPtrSet<const PHINode*, 32> PHIs; 3378 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 3379 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 3380 // an empty string as a length. 3381 return Len == ~0ULL ? 1 : Len; 3382 } 3383 3384 /// \p PN defines a loop-variant pointer to an object. Check if the 3385 /// previous iteration of the loop was referring to the same object as \p PN. 3386 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 3387 const LoopInfo *LI) { 3388 // Find the loop-defined value. 3389 Loop *L = LI->getLoopFor(PN->getParent()); 3390 if (PN->getNumIncomingValues() != 2) 3391 return true; 3392 3393 // Find the value from previous iteration. 3394 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 3395 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3396 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 3397 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3398 return true; 3399 3400 // If a new pointer is loaded in the loop, the pointer references a different 3401 // object in every iteration. E.g.: 3402 // for (i) 3403 // int *p = a[i]; 3404 // ... 3405 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 3406 if (!L->isLoopInvariant(Load->getPointerOperand())) 3407 return false; 3408 return true; 3409 } 3410 3411 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 3412 unsigned MaxLookup) { 3413 if (!V->getType()->isPointerTy()) 3414 return V; 3415 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 3416 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3417 V = GEP->getPointerOperand(); 3418 } else if (Operator::getOpcode(V) == Instruction::BitCast || 3419 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 3420 V = cast<Operator>(V)->getOperand(0); 3421 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 3422 if (GA->isInterposable()) 3423 return V; 3424 V = GA->getAliasee(); 3425 } else if (isa<AllocaInst>(V)) { 3426 // An alloca can't be further simplified. 3427 return V; 3428 } else { 3429 if (auto CS = CallSite(V)) 3430 if (Value *RV = CS.getReturnedArgOperand()) { 3431 V = RV; 3432 continue; 3433 } 3434 3435 // See if InstructionSimplify knows any relevant tricks. 3436 if (Instruction *I = dyn_cast<Instruction>(V)) 3437 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 3438 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) { 3439 V = Simplified; 3440 continue; 3441 } 3442 3443 return V; 3444 } 3445 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 3446 } 3447 return V; 3448 } 3449 3450 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 3451 const DataLayout &DL, LoopInfo *LI, 3452 unsigned MaxLookup) { 3453 SmallPtrSet<Value *, 4> Visited; 3454 SmallVector<Value *, 4> Worklist; 3455 Worklist.push_back(V); 3456 do { 3457 Value *P = Worklist.pop_back_val(); 3458 P = GetUnderlyingObject(P, DL, MaxLookup); 3459 3460 if (!Visited.insert(P).second) 3461 continue; 3462 3463 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 3464 Worklist.push_back(SI->getTrueValue()); 3465 Worklist.push_back(SI->getFalseValue()); 3466 continue; 3467 } 3468 3469 if (PHINode *PN = dyn_cast<PHINode>(P)) { 3470 // If this PHI changes the underlying object in every iteration of the 3471 // loop, don't look through it. Consider: 3472 // int **A; 3473 // for (i) { 3474 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 3475 // Curr = A[i]; 3476 // *Prev, *Curr; 3477 // 3478 // Prev is tracking Curr one iteration behind so they refer to different 3479 // underlying objects. 3480 if (!LI || !LI->isLoopHeader(PN->getParent()) || 3481 isSameUnderlyingObjectInLoop(PN, LI)) 3482 for (Value *IncValue : PN->incoming_values()) 3483 Worklist.push_back(IncValue); 3484 continue; 3485 } 3486 3487 Objects.push_back(P); 3488 } while (!Worklist.empty()); 3489 } 3490 3491 /// This is the function that does the work of looking through basic 3492 /// ptrtoint+arithmetic+inttoptr sequences. 3493 static const Value *getUnderlyingObjectFromInt(const Value *V) { 3494 do { 3495 if (const Operator *U = dyn_cast<Operator>(V)) { 3496 // If we find a ptrtoint, we can transfer control back to the 3497 // regular getUnderlyingObjectFromInt. 3498 if (U->getOpcode() == Instruction::PtrToInt) 3499 return U->getOperand(0); 3500 // If we find an add of a constant, a multiplied value, or a phi, it's 3501 // likely that the other operand will lead us to the base 3502 // object. We don't have to worry about the case where the 3503 // object address is somehow being computed by the multiply, 3504 // because our callers only care when the result is an 3505 // identifiable object. 3506 if (U->getOpcode() != Instruction::Add || 3507 (!isa<ConstantInt>(U->getOperand(1)) && 3508 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 3509 !isa<PHINode>(U->getOperand(1)))) 3510 return V; 3511 V = U->getOperand(0); 3512 } else { 3513 return V; 3514 } 3515 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 3516 } while (true); 3517 } 3518 3519 /// This is a wrapper around GetUnderlyingObjects and adds support for basic 3520 /// ptrtoint+arithmetic+inttoptr sequences. 3521 /// It returns false if unidentified object is found in GetUnderlyingObjects. 3522 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, 3523 SmallVectorImpl<Value *> &Objects, 3524 const DataLayout &DL) { 3525 SmallPtrSet<const Value *, 16> Visited; 3526 SmallVector<const Value *, 4> Working(1, V); 3527 do { 3528 V = Working.pop_back_val(); 3529 3530 SmallVector<Value *, 4> Objs; 3531 GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL); 3532 3533 for (Value *V : Objs) { 3534 if (!Visited.insert(V).second) 3535 continue; 3536 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 3537 const Value *O = 3538 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 3539 if (O->getType()->isPointerTy()) { 3540 Working.push_back(O); 3541 continue; 3542 } 3543 } 3544 // If GetUnderlyingObjects fails to find an identifiable object, 3545 // getUnderlyingObjectsForCodeGen also fails for safety. 3546 if (!isIdentifiedObject(V)) { 3547 Objects.clear(); 3548 return false; 3549 } 3550 Objects.push_back(const_cast<Value *>(V)); 3551 } 3552 } while (!Working.empty()); 3553 return true; 3554 } 3555 3556 /// Return true if the only users of this pointer are lifetime markers. 3557 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 3558 for (const User *U : V->users()) { 3559 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3560 if (!II) return false; 3561 3562 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 3563 II->getIntrinsicID() != Intrinsic::lifetime_end) 3564 return false; 3565 } 3566 return true; 3567 } 3568 3569 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3570 const Instruction *CtxI, 3571 const DominatorTree *DT) { 3572 const Operator *Inst = dyn_cast<Operator>(V); 3573 if (!Inst) 3574 return false; 3575 3576 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3577 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3578 if (C->canTrap()) 3579 return false; 3580 3581 switch (Inst->getOpcode()) { 3582 default: 3583 return true; 3584 case Instruction::UDiv: 3585 case Instruction::URem: { 3586 // x / y is undefined if y == 0. 3587 const APInt *V; 3588 if (match(Inst->getOperand(1), m_APInt(V))) 3589 return *V != 0; 3590 return false; 3591 } 3592 case Instruction::SDiv: 3593 case Instruction::SRem: { 3594 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3595 const APInt *Numerator, *Denominator; 3596 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3597 return false; 3598 // We cannot hoist this division if the denominator is 0. 3599 if (*Denominator == 0) 3600 return false; 3601 // It's safe to hoist if the denominator is not 0 or -1. 3602 if (*Denominator != -1) 3603 return true; 3604 // At this point we know that the denominator is -1. It is safe to hoist as 3605 // long we know that the numerator is not INT_MIN. 3606 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3607 return !Numerator->isMinSignedValue(); 3608 // The numerator *might* be MinSignedValue. 3609 return false; 3610 } 3611 case Instruction::Load: { 3612 const LoadInst *LI = cast<LoadInst>(Inst); 3613 if (!LI->isUnordered() || 3614 // Speculative load may create a race that did not exist in the source. 3615 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) || 3616 // Speculative load may load data from dirty regions. 3617 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 3618 LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 3619 return false; 3620 const DataLayout &DL = LI->getModule()->getDataLayout(); 3621 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), 3622 LI->getAlignment(), DL, CtxI, DT); 3623 } 3624 case Instruction::Call: { 3625 auto *CI = cast<const CallInst>(Inst); 3626 const Function *Callee = CI->getCalledFunction(); 3627 3628 // The called function could have undefined behavior or side-effects, even 3629 // if marked readnone nounwind. 3630 return Callee && Callee->isSpeculatable(); 3631 } 3632 case Instruction::VAArg: 3633 case Instruction::Alloca: 3634 case Instruction::Invoke: 3635 case Instruction::PHI: 3636 case Instruction::Store: 3637 case Instruction::Ret: 3638 case Instruction::Br: 3639 case Instruction::IndirectBr: 3640 case Instruction::Switch: 3641 case Instruction::Unreachable: 3642 case Instruction::Fence: 3643 case Instruction::AtomicRMW: 3644 case Instruction::AtomicCmpXchg: 3645 case Instruction::LandingPad: 3646 case Instruction::Resume: 3647 case Instruction::CatchSwitch: 3648 case Instruction::CatchPad: 3649 case Instruction::CatchRet: 3650 case Instruction::CleanupPad: 3651 case Instruction::CleanupRet: 3652 return false; // Misc instructions which have effects 3653 } 3654 } 3655 3656 bool llvm::mayBeMemoryDependent(const Instruction &I) { 3657 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 3658 } 3659 3660 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS, 3661 const Value *RHS, 3662 const DataLayout &DL, 3663 AssumptionCache *AC, 3664 const Instruction *CxtI, 3665 const DominatorTree *DT) { 3666 // Multiplying n * m significant bits yields a result of n + m significant 3667 // bits. If the total number of significant bits does not exceed the 3668 // result bit width (minus 1), there is no overflow. 3669 // This means if we have enough leading zero bits in the operands 3670 // we can guarantee that the result does not overflow. 3671 // Ref: "Hacker's Delight" by Henry Warren 3672 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3673 KnownBits LHSKnown(BitWidth); 3674 KnownBits RHSKnown(BitWidth); 3675 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT); 3676 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT); 3677 // Note that underestimating the number of zero bits gives a more 3678 // conservative answer. 3679 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() + 3680 RHSKnown.countMinLeadingZeros(); 3681 // First handle the easy case: if we have enough zero bits there's 3682 // definitely no overflow. 3683 if (ZeroBits >= BitWidth) 3684 return OverflowResult::NeverOverflows; 3685 3686 // Get the largest possible values for each operand. 3687 APInt LHSMax = ~LHSKnown.Zero; 3688 APInt RHSMax = ~RHSKnown.Zero; 3689 3690 // We know the multiply operation doesn't overflow if the maximum values for 3691 // each operand will not overflow after we multiply them together. 3692 bool MaxOverflow; 3693 (void)LHSMax.umul_ov(RHSMax, MaxOverflow); 3694 if (!MaxOverflow) 3695 return OverflowResult::NeverOverflows; 3696 3697 // We know it always overflows if multiplying the smallest possible values for 3698 // the operands also results in overflow. 3699 bool MinOverflow; 3700 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow); 3701 if (MinOverflow) 3702 return OverflowResult::AlwaysOverflows; 3703 3704 return OverflowResult::MayOverflow; 3705 } 3706 3707 OverflowResult llvm::computeOverflowForSignedMul(const Value *LHS, 3708 const Value *RHS, 3709 const DataLayout &DL, 3710 AssumptionCache *AC, 3711 const Instruction *CxtI, 3712 const DominatorTree *DT) { 3713 // Multiplying n * m significant bits yields a result of n + m significant 3714 // bits. If the total number of significant bits does not exceed the 3715 // result bit width (minus 1), there is no overflow. 3716 // This means if we have enough leading sign bits in the operands 3717 // we can guarantee that the result does not overflow. 3718 // Ref: "Hacker's Delight" by Henry Warren 3719 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3720 3721 // Note that underestimating the number of sign bits gives a more 3722 // conservative answer. 3723 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + 3724 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); 3725 3726 // First handle the easy case: if we have enough sign bits there's 3727 // definitely no overflow. 3728 if (SignBits > BitWidth + 1) 3729 return OverflowResult::NeverOverflows; 3730 3731 // There are two ambiguous cases where there can be no overflow: 3732 // SignBits == BitWidth + 1 and 3733 // SignBits == BitWidth 3734 // The second case is difficult to check, therefore we only handle the 3735 // first case. 3736 if (SignBits == BitWidth + 1) { 3737 // It overflows only when both arguments are negative and the true 3738 // product is exactly the minimum negative number. 3739 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 3740 // For simplicity we just check if at least one side is not negative. 3741 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 3742 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 3743 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) 3744 return OverflowResult::NeverOverflows; 3745 } 3746 return OverflowResult::MayOverflow; 3747 } 3748 3749 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS, 3750 const Value *RHS, 3751 const DataLayout &DL, 3752 AssumptionCache *AC, 3753 const Instruction *CxtI, 3754 const DominatorTree *DT) { 3755 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 3756 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) { 3757 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 3758 3759 if (LHSKnown.isNegative() && RHSKnown.isNegative()) { 3760 // The sign bit is set in both cases: this MUST overflow. 3761 // Create a simple add instruction, and insert it into the struct. 3762 return OverflowResult::AlwaysOverflows; 3763 } 3764 3765 if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) { 3766 // The sign bit is clear in both cases: this CANNOT overflow. 3767 // Create a simple add instruction, and insert it into the struct. 3768 return OverflowResult::NeverOverflows; 3769 } 3770 } 3771 3772 return OverflowResult::MayOverflow; 3773 } 3774 3775 /// Return true if we can prove that adding the two values of the 3776 /// knownbits will not overflow. 3777 /// Otherwise return false. 3778 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown, 3779 const KnownBits &RHSKnown) { 3780 // Addition of two 2's complement numbers having opposite signs will never 3781 // overflow. 3782 if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) || 3783 (LHSKnown.isNonNegative() && RHSKnown.isNegative())) 3784 return true; 3785 3786 // If either of the values is known to be non-negative, adding them can only 3787 // overflow if the second is also non-negative, so we can assume that. 3788 // Two non-negative numbers will only overflow if there is a carry to the 3789 // sign bit, so we can check if even when the values are as big as possible 3790 // there is no overflow to the sign bit. 3791 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) { 3792 APInt MaxLHS = ~LHSKnown.Zero; 3793 MaxLHS.clearSignBit(); 3794 APInt MaxRHS = ~RHSKnown.Zero; 3795 MaxRHS.clearSignBit(); 3796 APInt Result = std::move(MaxLHS) + std::move(MaxRHS); 3797 return Result.isSignBitClear(); 3798 } 3799 3800 // If either of the values is known to be negative, adding them can only 3801 // overflow if the second is also negative, so we can assume that. 3802 // Two negative number will only overflow if there is no carry to the sign 3803 // bit, so we can check if even when the values are as small as possible 3804 // there is overflow to the sign bit. 3805 if (LHSKnown.isNegative() || RHSKnown.isNegative()) { 3806 APInt MinLHS = LHSKnown.One; 3807 MinLHS.clearSignBit(); 3808 APInt MinRHS = RHSKnown.One; 3809 MinRHS.clearSignBit(); 3810 APInt Result = std::move(MinLHS) + std::move(MinRHS); 3811 return Result.isSignBitSet(); 3812 } 3813 3814 // If we reached here it means that we know nothing about the sign bits. 3815 // In this case we can't know if there will be an overflow, since by 3816 // changing the sign bits any two values can be made to overflow. 3817 return false; 3818 } 3819 3820 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 3821 const Value *RHS, 3822 const AddOperator *Add, 3823 const DataLayout &DL, 3824 AssumptionCache *AC, 3825 const Instruction *CxtI, 3826 const DominatorTree *DT) { 3827 if (Add && Add->hasNoSignedWrap()) { 3828 return OverflowResult::NeverOverflows; 3829 } 3830 3831 // If LHS and RHS each have at least two sign bits, the addition will look 3832 // like 3833 // 3834 // XX..... + 3835 // YY..... 3836 // 3837 // If the carry into the most significant position is 0, X and Y can't both 3838 // be 1 and therefore the carry out of the addition is also 0. 3839 // 3840 // If the carry into the most significant position is 1, X and Y can't both 3841 // be 0 and therefore the carry out of the addition is also 1. 3842 // 3843 // Since the carry into the most significant position is always equal to 3844 // the carry out of the addition, there is no signed overflow. 3845 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 3846 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 3847 return OverflowResult::NeverOverflows; 3848 3849 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 3850 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 3851 3852 if (checkRippleForSignedAdd(LHSKnown, RHSKnown)) 3853 return OverflowResult::NeverOverflows; 3854 3855 // The remaining code needs Add to be available. Early returns if not so. 3856 if (!Add) 3857 return OverflowResult::MayOverflow; 3858 3859 // If the sign of Add is the same as at least one of the operands, this add 3860 // CANNOT overflow. This is particularly useful when the sum is 3861 // @llvm.assume'ed non-negative rather than proved so from analyzing its 3862 // operands. 3863 bool LHSOrRHSKnownNonNegative = 3864 (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()); 3865 bool LHSOrRHSKnownNegative = 3866 (LHSKnown.isNegative() || RHSKnown.isNegative()); 3867 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 3868 KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT); 3869 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 3870 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) { 3871 return OverflowResult::NeverOverflows; 3872 } 3873 } 3874 3875 return OverflowResult::MayOverflow; 3876 } 3877 3878 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, 3879 const Value *RHS, 3880 const DataLayout &DL, 3881 AssumptionCache *AC, 3882 const Instruction *CxtI, 3883 const DominatorTree *DT) { 3884 // If the LHS is negative and the RHS is non-negative, no unsigned wrap. 3885 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 3886 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 3887 if (LHSKnown.isNegative() && RHSKnown.isNonNegative()) 3888 return OverflowResult::NeverOverflows; 3889 3890 return OverflowResult::MayOverflow; 3891 } 3892 3893 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, 3894 const Value *RHS, 3895 const DataLayout &DL, 3896 AssumptionCache *AC, 3897 const Instruction *CxtI, 3898 const DominatorTree *DT) { 3899 // If LHS and RHS each have at least two sign bits, the subtraction 3900 // cannot overflow. 3901 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 3902 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 3903 return OverflowResult::NeverOverflows; 3904 3905 KnownBits LHSKnown = computeKnownBits(LHS, DL, 0, AC, CxtI, DT); 3906 3907 KnownBits RHSKnown = computeKnownBits(RHS, DL, 0, AC, CxtI, DT); 3908 3909 // Subtraction of two 2's complement numbers having identical signs will 3910 // never overflow. 3911 if ((LHSKnown.isNegative() && RHSKnown.isNegative()) || 3912 (LHSKnown.isNonNegative() && RHSKnown.isNonNegative())) 3913 return OverflowResult::NeverOverflows; 3914 3915 // TODO: implement logic similar to checkRippleForAdd 3916 return OverflowResult::MayOverflow; 3917 } 3918 3919 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, 3920 const DominatorTree &DT) { 3921 #ifndef NDEBUG 3922 auto IID = II->getIntrinsicID(); 3923 assert((IID == Intrinsic::sadd_with_overflow || 3924 IID == Intrinsic::uadd_with_overflow || 3925 IID == Intrinsic::ssub_with_overflow || 3926 IID == Intrinsic::usub_with_overflow || 3927 IID == Intrinsic::smul_with_overflow || 3928 IID == Intrinsic::umul_with_overflow) && 3929 "Not an overflow intrinsic!"); 3930 #endif 3931 3932 SmallVector<const BranchInst *, 2> GuardingBranches; 3933 SmallVector<const ExtractValueInst *, 2> Results; 3934 3935 for (const User *U : II->users()) { 3936 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 3937 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 3938 3939 if (EVI->getIndices()[0] == 0) 3940 Results.push_back(EVI); 3941 else { 3942 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 3943 3944 for (const auto *U : EVI->users()) 3945 if (const auto *B = dyn_cast<BranchInst>(U)) { 3946 assert(B->isConditional() && "How else is it using an i1?"); 3947 GuardingBranches.push_back(B); 3948 } 3949 } 3950 } else { 3951 // We are using the aggregate directly in a way we don't want to analyze 3952 // here (storing it to a global, say). 3953 return false; 3954 } 3955 } 3956 3957 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 3958 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 3959 if (!NoWrapEdge.isSingleEdge()) 3960 return false; 3961 3962 // Check if all users of the add are provably no-wrap. 3963 for (const auto *Result : Results) { 3964 // If the extractvalue itself is not executed on overflow, the we don't 3965 // need to check each use separately, since domination is transitive. 3966 if (DT.dominates(NoWrapEdge, Result->getParent())) 3967 continue; 3968 3969 for (auto &RU : Result->uses()) 3970 if (!DT.dominates(NoWrapEdge, RU)) 3971 return false; 3972 } 3973 3974 return true; 3975 }; 3976 3977 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); 3978 } 3979 3980 3981 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 3982 const DataLayout &DL, 3983 AssumptionCache *AC, 3984 const Instruction *CxtI, 3985 const DominatorTree *DT) { 3986 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 3987 Add, DL, AC, CxtI, DT); 3988 } 3989 3990 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 3991 const Value *RHS, 3992 const DataLayout &DL, 3993 AssumptionCache *AC, 3994 const Instruction *CxtI, 3995 const DominatorTree *DT) { 3996 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 3997 } 3998 3999 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 4000 // A memory operation returns normally if it isn't volatile. A volatile 4001 // operation is allowed to trap. 4002 // 4003 // An atomic operation isn't guaranteed to return in a reasonable amount of 4004 // time because it's possible for another thread to interfere with it for an 4005 // arbitrary length of time, but programs aren't allowed to rely on that. 4006 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 4007 return !LI->isVolatile(); 4008 if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 4009 return !SI->isVolatile(); 4010 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 4011 return !CXI->isVolatile(); 4012 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 4013 return !RMWI->isVolatile(); 4014 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I)) 4015 return !MII->isVolatile(); 4016 4017 // If there is no successor, then execution can't transfer to it. 4018 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) 4019 return !CRI->unwindsToCaller(); 4020 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) 4021 return !CatchSwitch->unwindsToCaller(); 4022 if (isa<ResumeInst>(I)) 4023 return false; 4024 if (isa<ReturnInst>(I)) 4025 return false; 4026 if (isa<UnreachableInst>(I)) 4027 return false; 4028 4029 // Calls can throw, or contain an infinite loop, or kill the process. 4030 if (auto CS = ImmutableCallSite(I)) { 4031 // Call sites that throw have implicit non-local control flow. 4032 if (!CS.doesNotThrow()) 4033 return false; 4034 4035 // Non-throwing call sites can loop infinitely, call exit/pthread_exit 4036 // etc. and thus not return. However, LLVM already assumes that 4037 // 4038 // - Thread exiting actions are modeled as writes to memory invisible to 4039 // the program. 4040 // 4041 // - Loops that don't have side effects (side effects are volatile/atomic 4042 // stores and IO) always terminate (see http://llvm.org/PR965). 4043 // Furthermore IO itself is also modeled as writes to memory invisible to 4044 // the program. 4045 // 4046 // We rely on those assumptions here, and use the memory effects of the call 4047 // target as a proxy for checking that it always returns. 4048 4049 // FIXME: This isn't aggressive enough; a call which only writes to a global 4050 // is guaranteed to return. 4051 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() || 4052 match(I, m_Intrinsic<Intrinsic::assume>()) || 4053 match(I, m_Intrinsic<Intrinsic::sideeffect>()); 4054 } 4055 4056 // Other instructions return normally. 4057 return true; 4058 } 4059 4060 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { 4061 // TODO: This is slightly consdervative for invoke instruction since exiting 4062 // via an exception *is* normal control for them. 4063 for (auto I = BB->begin(), E = BB->end(); I != E; ++I) 4064 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 4065 return false; 4066 return true; 4067 } 4068 4069 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 4070 const Loop *L) { 4071 // The loop header is guaranteed to be executed for every iteration. 4072 // 4073 // FIXME: Relax this constraint to cover all basic blocks that are 4074 // guaranteed to be executed at every iteration. 4075 if (I->getParent() != L->getHeader()) return false; 4076 4077 for (const Instruction &LI : *L->getHeader()) { 4078 if (&LI == I) return true; 4079 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 4080 } 4081 llvm_unreachable("Instruction not contained in its own parent basic block."); 4082 } 4083 4084 bool llvm::propagatesFullPoison(const Instruction *I) { 4085 switch (I->getOpcode()) { 4086 case Instruction::Add: 4087 case Instruction::Sub: 4088 case Instruction::Xor: 4089 case Instruction::Trunc: 4090 case Instruction::BitCast: 4091 case Instruction::AddrSpaceCast: 4092 case Instruction::Mul: 4093 case Instruction::Shl: 4094 case Instruction::GetElementPtr: 4095 // These operations all propagate poison unconditionally. Note that poison 4096 // is not any particular value, so xor or subtraction of poison with 4097 // itself still yields poison, not zero. 4098 return true; 4099 4100 case Instruction::AShr: 4101 case Instruction::SExt: 4102 // For these operations, one bit of the input is replicated across 4103 // multiple output bits. A replicated poison bit is still poison. 4104 return true; 4105 4106 case Instruction::ICmp: 4107 // Comparing poison with any value yields poison. This is why, for 4108 // instance, x s< (x +nsw 1) can be folded to true. 4109 return true; 4110 4111 default: 4112 return false; 4113 } 4114 } 4115 4116 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) { 4117 switch (I->getOpcode()) { 4118 case Instruction::Store: 4119 return cast<StoreInst>(I)->getPointerOperand(); 4120 4121 case Instruction::Load: 4122 return cast<LoadInst>(I)->getPointerOperand(); 4123 4124 case Instruction::AtomicCmpXchg: 4125 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 4126 4127 case Instruction::AtomicRMW: 4128 return cast<AtomicRMWInst>(I)->getPointerOperand(); 4129 4130 case Instruction::UDiv: 4131 case Instruction::SDiv: 4132 case Instruction::URem: 4133 case Instruction::SRem: 4134 return I->getOperand(1); 4135 4136 default: 4137 return nullptr; 4138 } 4139 } 4140 4141 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) { 4142 // We currently only look for uses of poison values within the same basic 4143 // block, as that makes it easier to guarantee that the uses will be 4144 // executed given that PoisonI is executed. 4145 // 4146 // FIXME: Expand this to consider uses beyond the same basic block. To do 4147 // this, look out for the distinction between post-dominance and strong 4148 // post-dominance. 4149 const BasicBlock *BB = PoisonI->getParent(); 4150 4151 // Set of instructions that we have proved will yield poison if PoisonI 4152 // does. 4153 SmallSet<const Value *, 16> YieldsPoison; 4154 SmallSet<const BasicBlock *, 4> Visited; 4155 YieldsPoison.insert(PoisonI); 4156 Visited.insert(PoisonI->getParent()); 4157 4158 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end(); 4159 4160 unsigned Iter = 0; 4161 while (Iter++ < MaxDepth) { 4162 for (auto &I : make_range(Begin, End)) { 4163 if (&I != PoisonI) { 4164 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I); 4165 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) 4166 return true; 4167 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 4168 return false; 4169 } 4170 4171 // Mark poison that propagates from I through uses of I. 4172 if (YieldsPoison.count(&I)) { 4173 for (const User *User : I.users()) { 4174 const Instruction *UserI = cast<Instruction>(User); 4175 if (propagatesFullPoison(UserI)) 4176 YieldsPoison.insert(User); 4177 } 4178 } 4179 } 4180 4181 if (auto *NextBB = BB->getSingleSuccessor()) { 4182 if (Visited.insert(NextBB).second) { 4183 BB = NextBB; 4184 Begin = BB->getFirstNonPHI()->getIterator(); 4185 End = BB->end(); 4186 continue; 4187 } 4188 } 4189 4190 break; 4191 } 4192 return false; 4193 } 4194 4195 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 4196 if (FMF.noNaNs()) 4197 return true; 4198 4199 if (auto *C = dyn_cast<ConstantFP>(V)) 4200 return !C->isNaN(); 4201 return false; 4202 } 4203 4204 static bool isKnownNonZero(const Value *V) { 4205 if (auto *C = dyn_cast<ConstantFP>(V)) 4206 return !C->isZero(); 4207 return false; 4208 } 4209 4210 /// Match clamp pattern for float types without care about NaNs or signed zeros. 4211 /// Given non-min/max outer cmp/select from the clamp pattern this 4212 /// function recognizes if it can be substitued by a "canonical" min/max 4213 /// pattern. 4214 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, 4215 Value *CmpLHS, Value *CmpRHS, 4216 Value *TrueVal, Value *FalseVal, 4217 Value *&LHS, Value *&RHS) { 4218 // Try to match 4219 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) 4220 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) 4221 // and return description of the outer Max/Min. 4222 4223 // First, check if select has inverse order: 4224 if (CmpRHS == FalseVal) { 4225 std::swap(TrueVal, FalseVal); 4226 Pred = CmpInst::getInversePredicate(Pred); 4227 } 4228 4229 // Assume success now. If there's no match, callers should not use these anyway. 4230 LHS = TrueVal; 4231 RHS = FalseVal; 4232 4233 const APFloat *FC1; 4234 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) 4235 return {SPF_UNKNOWN, SPNB_NA, false}; 4236 4237 const APFloat *FC2; 4238 switch (Pred) { 4239 case CmpInst::FCMP_OLT: 4240 case CmpInst::FCMP_OLE: 4241 case CmpInst::FCMP_ULT: 4242 case CmpInst::FCMP_ULE: 4243 if (match(FalseVal, 4244 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), 4245 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4246 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan) 4247 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; 4248 break; 4249 case CmpInst::FCMP_OGT: 4250 case CmpInst::FCMP_OGE: 4251 case CmpInst::FCMP_UGT: 4252 case CmpInst::FCMP_UGE: 4253 if (match(FalseVal, 4254 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), 4255 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4256 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan) 4257 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; 4258 break; 4259 default: 4260 break; 4261 } 4262 4263 return {SPF_UNKNOWN, SPNB_NA, false}; 4264 } 4265 4266 /// Recognize variations of: 4267 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 4268 static SelectPatternResult matchClamp(CmpInst::Predicate Pred, 4269 Value *CmpLHS, Value *CmpRHS, 4270 Value *TrueVal, Value *FalseVal) { 4271 // Swap the select operands and predicate to match the patterns below. 4272 if (CmpRHS != TrueVal) { 4273 Pred = ICmpInst::getSwappedPredicate(Pred); 4274 std::swap(TrueVal, FalseVal); 4275 } 4276 const APInt *C1; 4277 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 4278 const APInt *C2; 4279 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 4280 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 4281 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 4282 return {SPF_SMAX, SPNB_NA, false}; 4283 4284 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 4285 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 4286 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 4287 return {SPF_SMIN, SPNB_NA, false}; 4288 4289 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 4290 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 4291 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 4292 return {SPF_UMAX, SPNB_NA, false}; 4293 4294 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 4295 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 4296 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 4297 return {SPF_UMIN, SPNB_NA, false}; 4298 } 4299 return {SPF_UNKNOWN, SPNB_NA, false}; 4300 } 4301 4302 /// Recognize variations of: 4303 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) 4304 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, 4305 Value *CmpLHS, Value *CmpRHS, 4306 Value *TVal, Value *FVal, 4307 unsigned Depth) { 4308 // TODO: Allow FP min/max with nnan/nsz. 4309 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison"); 4310 4311 Value *A, *B; 4312 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); 4313 if (!SelectPatternResult::isMinOrMax(L.Flavor)) 4314 return {SPF_UNKNOWN, SPNB_NA, false}; 4315 4316 Value *C, *D; 4317 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); 4318 if (L.Flavor != R.Flavor) 4319 return {SPF_UNKNOWN, SPNB_NA, false}; 4320 4321 // We have something like: x Pred y ? min(a, b) : min(c, d). 4322 // Try to match the compare to the min/max operations of the select operands. 4323 // First, make sure we have the right compare predicate. 4324 switch (L.Flavor) { 4325 case SPF_SMIN: 4326 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { 4327 Pred = ICmpInst::getSwappedPredicate(Pred); 4328 std::swap(CmpLHS, CmpRHS); 4329 } 4330 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 4331 break; 4332 return {SPF_UNKNOWN, SPNB_NA, false}; 4333 case SPF_SMAX: 4334 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { 4335 Pred = ICmpInst::getSwappedPredicate(Pred); 4336 std::swap(CmpLHS, CmpRHS); 4337 } 4338 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 4339 break; 4340 return {SPF_UNKNOWN, SPNB_NA, false}; 4341 case SPF_UMIN: 4342 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { 4343 Pred = ICmpInst::getSwappedPredicate(Pred); 4344 std::swap(CmpLHS, CmpRHS); 4345 } 4346 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 4347 break; 4348 return {SPF_UNKNOWN, SPNB_NA, false}; 4349 case SPF_UMAX: 4350 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 4351 Pred = ICmpInst::getSwappedPredicate(Pred); 4352 std::swap(CmpLHS, CmpRHS); 4353 } 4354 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 4355 break; 4356 return {SPF_UNKNOWN, SPNB_NA, false}; 4357 default: 4358 return {SPF_UNKNOWN, SPNB_NA, false}; 4359 } 4360 4361 // If there is a common operand in the already matched min/max and the other 4362 // min/max operands match the compare operands (either directly or inverted), 4363 // then this is min/max of the same flavor. 4364 4365 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4366 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4367 if (D == B) { 4368 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4369 match(A, m_Not(m_Specific(CmpRHS))))) 4370 return {L.Flavor, SPNB_NA, false}; 4371 } 4372 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4373 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4374 if (C == B) { 4375 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4376 match(A, m_Not(m_Specific(CmpRHS))))) 4377 return {L.Flavor, SPNB_NA, false}; 4378 } 4379 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4380 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4381 if (D == A) { 4382 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4383 match(B, m_Not(m_Specific(CmpRHS))))) 4384 return {L.Flavor, SPNB_NA, false}; 4385 } 4386 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4387 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4388 if (C == A) { 4389 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4390 match(B, m_Not(m_Specific(CmpRHS))))) 4391 return {L.Flavor, SPNB_NA, false}; 4392 } 4393 4394 return {SPF_UNKNOWN, SPNB_NA, false}; 4395 } 4396 4397 /// Match non-obvious integer minimum and maximum sequences. 4398 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 4399 Value *CmpLHS, Value *CmpRHS, 4400 Value *TrueVal, Value *FalseVal, 4401 Value *&LHS, Value *&RHS, 4402 unsigned Depth) { 4403 // Assume success. If there's no match, callers should not use these anyway. 4404 LHS = TrueVal; 4405 RHS = FalseVal; 4406 4407 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); 4408 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4409 return SPR; 4410 4411 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); 4412 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4413 return SPR; 4414 4415 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 4416 return {SPF_UNKNOWN, SPNB_NA, false}; 4417 4418 // Z = X -nsw Y 4419 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) 4420 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) 4421 if (match(TrueVal, m_Zero()) && 4422 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4423 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4424 4425 // Z = X -nsw Y 4426 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) 4427 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) 4428 if (match(FalseVal, m_Zero()) && 4429 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4430 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4431 4432 const APInt *C1; 4433 if (!match(CmpRHS, m_APInt(C1))) 4434 return {SPF_UNKNOWN, SPNB_NA, false}; 4435 4436 // An unsigned min/max can be written with a signed compare. 4437 const APInt *C2; 4438 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 4439 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 4440 // Is the sign bit set? 4441 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 4442 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 4443 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() && 4444 C2->isMaxSignedValue()) 4445 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4446 4447 // Is the sign bit clear? 4448 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 4449 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 4450 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && 4451 C2->isMinSignedValue()) 4452 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4453 } 4454 4455 // Look through 'not' ops to find disguised signed min/max. 4456 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C) 4457 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C) 4458 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) && 4459 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2) 4460 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4461 4462 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X) 4463 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X) 4464 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) && 4465 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2) 4466 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4467 4468 return {SPF_UNKNOWN, SPNB_NA, false}; 4469 } 4470 4471 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 4472 FastMathFlags FMF, 4473 Value *CmpLHS, Value *CmpRHS, 4474 Value *TrueVal, Value *FalseVal, 4475 Value *&LHS, Value *&RHS, 4476 unsigned Depth) { 4477 LHS = CmpLHS; 4478 RHS = CmpRHS; 4479 4480 // Signed zero may return inconsistent results between implementations. 4481 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 4482 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 4483 // Therefore, we behave conservatively and only proceed if at least one of the 4484 // operands is known to not be zero or if we don't care about signed zero. 4485 switch (Pred) { 4486 default: break; 4487 // FIXME: Include OGT/OLT/UGT/ULT. 4488 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 4489 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 4490 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4491 !isKnownNonZero(CmpRHS)) 4492 return {SPF_UNKNOWN, SPNB_NA, false}; 4493 } 4494 4495 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 4496 bool Ordered = false; 4497 4498 // When given one NaN and one non-NaN input: 4499 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 4500 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 4501 // ordered comparison fails), which could be NaN or non-NaN. 4502 // so here we discover exactly what NaN behavior is required/accepted. 4503 if (CmpInst::isFPPredicate(Pred)) { 4504 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 4505 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 4506 4507 if (LHSSafe && RHSSafe) { 4508 // Both operands are known non-NaN. 4509 NaNBehavior = SPNB_RETURNS_ANY; 4510 } else if (CmpInst::isOrdered(Pred)) { 4511 // An ordered comparison will return false when given a NaN, so it 4512 // returns the RHS. 4513 Ordered = true; 4514 if (LHSSafe) 4515 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 4516 NaNBehavior = SPNB_RETURNS_NAN; 4517 else if (RHSSafe) 4518 NaNBehavior = SPNB_RETURNS_OTHER; 4519 else 4520 // Completely unsafe. 4521 return {SPF_UNKNOWN, SPNB_NA, false}; 4522 } else { 4523 Ordered = false; 4524 // An unordered comparison will return true when given a NaN, so it 4525 // returns the LHS. 4526 if (LHSSafe) 4527 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 4528 NaNBehavior = SPNB_RETURNS_OTHER; 4529 else if (RHSSafe) 4530 NaNBehavior = SPNB_RETURNS_NAN; 4531 else 4532 // Completely unsafe. 4533 return {SPF_UNKNOWN, SPNB_NA, false}; 4534 } 4535 } 4536 4537 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 4538 std::swap(CmpLHS, CmpRHS); 4539 Pred = CmpInst::getSwappedPredicate(Pred); 4540 if (NaNBehavior == SPNB_RETURNS_NAN) 4541 NaNBehavior = SPNB_RETURNS_OTHER; 4542 else if (NaNBehavior == SPNB_RETURNS_OTHER) 4543 NaNBehavior = SPNB_RETURNS_NAN; 4544 Ordered = !Ordered; 4545 } 4546 4547 // ([if]cmp X, Y) ? X : Y 4548 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 4549 switch (Pred) { 4550 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 4551 case ICmpInst::ICMP_UGT: 4552 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 4553 case ICmpInst::ICMP_SGT: 4554 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 4555 case ICmpInst::ICMP_ULT: 4556 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 4557 case ICmpInst::ICMP_SLT: 4558 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 4559 case FCmpInst::FCMP_UGT: 4560 case FCmpInst::FCMP_UGE: 4561 case FCmpInst::FCMP_OGT: 4562 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 4563 case FCmpInst::FCMP_ULT: 4564 case FCmpInst::FCMP_ULE: 4565 case FCmpInst::FCMP_OLT: 4566 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 4567 } 4568 } 4569 4570 const APInt *C1; 4571 if (match(CmpRHS, m_APInt(C1))) { 4572 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) || 4573 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) { 4574 4575 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X 4576 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X 4577 if (Pred == ICmpInst::ICMP_SGT && 4578 (C1->isNullValue() || C1->isAllOnesValue())) { 4579 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 4580 } 4581 4582 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X 4583 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X 4584 if (Pred == ICmpInst::ICMP_SLT && 4585 (C1->isNullValue() || C1->isOneValue())) { 4586 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 4587 } 4588 } 4589 } 4590 4591 if (CmpInst::isIntPredicate(Pred)) 4592 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); 4593 4594 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar 4595 // may return either -0.0 or 0.0, so fcmp/select pair has stricter 4596 // semantics than minNum. Be conservative in such case. 4597 if (NaNBehavior != SPNB_RETURNS_ANY || 4598 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4599 !isKnownNonZero(CmpRHS))) 4600 return {SPF_UNKNOWN, SPNB_NA, false}; 4601 4602 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 4603 } 4604 4605 /// Helps to match a select pattern in case of a type mismatch. 4606 /// 4607 /// The function processes the case when type of true and false values of a 4608 /// select instruction differs from type of the cmp instruction operands because 4609 /// of a cast instruction. The function checks if it is legal to move the cast 4610 /// operation after "select". If yes, it returns the new second value of 4611 /// "select" (with the assumption that cast is moved): 4612 /// 1. As operand of cast instruction when both values of "select" are same cast 4613 /// instructions. 4614 /// 2. As restored constant (by applying reverse cast operation) when the first 4615 /// value of the "select" is a cast operation and the second value is a 4616 /// constant. 4617 /// NOTE: We return only the new second value because the first value could be 4618 /// accessed as operand of cast instruction. 4619 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 4620 Instruction::CastOps *CastOp) { 4621 auto *Cast1 = dyn_cast<CastInst>(V1); 4622 if (!Cast1) 4623 return nullptr; 4624 4625 *CastOp = Cast1->getOpcode(); 4626 Type *SrcTy = Cast1->getSrcTy(); 4627 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 4628 // If V1 and V2 are both the same cast from the same type, look through V1. 4629 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 4630 return Cast2->getOperand(0); 4631 return nullptr; 4632 } 4633 4634 auto *C = dyn_cast<Constant>(V2); 4635 if (!C) 4636 return nullptr; 4637 4638 Constant *CastedTo = nullptr; 4639 switch (*CastOp) { 4640 case Instruction::ZExt: 4641 if (CmpI->isUnsigned()) 4642 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 4643 break; 4644 case Instruction::SExt: 4645 if (CmpI->isSigned()) 4646 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 4647 break; 4648 case Instruction::Trunc: 4649 Constant *CmpConst; 4650 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && 4651 CmpConst->getType() == SrcTy) { 4652 // Here we have the following case: 4653 // 4654 // %cond = cmp iN %x, CmpConst 4655 // %tr = trunc iN %x to iK 4656 // %narrowsel = select i1 %cond, iK %t, iK C 4657 // 4658 // We can always move trunc after select operation: 4659 // 4660 // %cond = cmp iN %x, CmpConst 4661 // %widesel = select i1 %cond, iN %x, iN CmpConst 4662 // %tr = trunc iN %widesel to iK 4663 // 4664 // Note that C could be extended in any way because we don't care about 4665 // upper bits after truncation. It can't be abs pattern, because it would 4666 // look like: 4667 // 4668 // select i1 %cond, x, -x. 4669 // 4670 // So only min/max pattern could be matched. Such match requires widened C 4671 // == CmpConst. That is why set widened C = CmpConst, condition trunc 4672 // CmpConst == C is checked below. 4673 CastedTo = CmpConst; 4674 } else { 4675 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 4676 } 4677 break; 4678 case Instruction::FPTrunc: 4679 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 4680 break; 4681 case Instruction::FPExt: 4682 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 4683 break; 4684 case Instruction::FPToUI: 4685 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 4686 break; 4687 case Instruction::FPToSI: 4688 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 4689 break; 4690 case Instruction::UIToFP: 4691 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 4692 break; 4693 case Instruction::SIToFP: 4694 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 4695 break; 4696 default: 4697 break; 4698 } 4699 4700 if (!CastedTo) 4701 return nullptr; 4702 4703 // Make sure the cast doesn't lose any information. 4704 Constant *CastedBack = 4705 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 4706 if (CastedBack != C) 4707 return nullptr; 4708 4709 return CastedTo; 4710 } 4711 4712 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 4713 Instruction::CastOps *CastOp, 4714 unsigned Depth) { 4715 if (Depth >= MaxDepth) 4716 return {SPF_UNKNOWN, SPNB_NA, false}; 4717 4718 SelectInst *SI = dyn_cast<SelectInst>(V); 4719 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 4720 4721 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 4722 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 4723 4724 CmpInst::Predicate Pred = CmpI->getPredicate(); 4725 Value *CmpLHS = CmpI->getOperand(0); 4726 Value *CmpRHS = CmpI->getOperand(1); 4727 Value *TrueVal = SI->getTrueValue(); 4728 Value *FalseVal = SI->getFalseValue(); 4729 FastMathFlags FMF; 4730 if (isa<FPMathOperator>(CmpI)) 4731 FMF = CmpI->getFastMathFlags(); 4732 4733 // Bail out early. 4734 if (CmpI->isEquality()) 4735 return {SPF_UNKNOWN, SPNB_NA, false}; 4736 4737 // Deal with type mismatches. 4738 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 4739 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { 4740 // If this is a potential fmin/fmax with a cast to integer, then ignore 4741 // -0.0 because there is no corresponding integer value. 4742 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 4743 FMF.setNoSignedZeros(); 4744 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4745 cast<CastInst>(TrueVal)->getOperand(0), C, 4746 LHS, RHS, Depth); 4747 } 4748 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { 4749 // If this is a potential fmin/fmax with a cast to integer, then ignore 4750 // -0.0 because there is no corresponding integer value. 4751 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 4752 FMF.setNoSignedZeros(); 4753 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4754 C, cast<CastInst>(FalseVal)->getOperand(0), 4755 LHS, RHS, Depth); 4756 } 4757 } 4758 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 4759 LHS, RHS, Depth); 4760 } 4761 4762 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) { 4763 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT; 4764 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT; 4765 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT; 4766 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT; 4767 if (SPF == SPF_FMINNUM) 4768 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; 4769 if (SPF == SPF_FMAXNUM) 4770 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; 4771 llvm_unreachable("unhandled!"); 4772 } 4773 4774 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) { 4775 if (SPF == SPF_SMIN) return SPF_SMAX; 4776 if (SPF == SPF_UMIN) return SPF_UMAX; 4777 if (SPF == SPF_SMAX) return SPF_SMIN; 4778 if (SPF == SPF_UMAX) return SPF_UMIN; 4779 llvm_unreachable("unhandled!"); 4780 } 4781 4782 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) { 4783 return getMinMaxPred(getInverseMinMaxFlavor(SPF)); 4784 } 4785 4786 /// Return true if "icmp Pred LHS RHS" is always true. 4787 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, 4788 const Value *RHS, const DataLayout &DL, 4789 unsigned Depth) { 4790 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 4791 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 4792 return true; 4793 4794 switch (Pred) { 4795 default: 4796 return false; 4797 4798 case CmpInst::ICMP_SLE: { 4799 const APInt *C; 4800 4801 // LHS s<= LHS +_{nsw} C if C >= 0 4802 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 4803 return !C->isNegative(); 4804 return false; 4805 } 4806 4807 case CmpInst::ICMP_ULE: { 4808 const APInt *C; 4809 4810 // LHS u<= LHS +_{nuw} C for any C 4811 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 4812 return true; 4813 4814 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 4815 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 4816 const Value *&X, 4817 const APInt *&CA, const APInt *&CB) { 4818 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 4819 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 4820 return true; 4821 4822 // If X & C == 0 then (X | C) == X +_{nuw} C 4823 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 4824 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 4825 KnownBits Known(CA->getBitWidth()); 4826 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, 4827 /*CxtI*/ nullptr, /*DT*/ nullptr); 4828 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 4829 return true; 4830 } 4831 4832 return false; 4833 }; 4834 4835 const Value *X; 4836 const APInt *CLHS, *CRHS; 4837 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 4838 return CLHS->ule(*CRHS); 4839 4840 return false; 4841 } 4842 } 4843 } 4844 4845 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 4846 /// ALHS ARHS" is true. Otherwise, return None. 4847 static Optional<bool> 4848 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 4849 const Value *ARHS, const Value *BLHS, const Value *BRHS, 4850 const DataLayout &DL, unsigned Depth) { 4851 switch (Pred) { 4852 default: 4853 return None; 4854 4855 case CmpInst::ICMP_SLT: 4856 case CmpInst::ICMP_SLE: 4857 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && 4858 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) 4859 return true; 4860 return None; 4861 4862 case CmpInst::ICMP_ULT: 4863 case CmpInst::ICMP_ULE: 4864 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && 4865 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) 4866 return true; 4867 return None; 4868 } 4869 } 4870 4871 /// Return true if the operands of the two compares match. IsSwappedOps is true 4872 /// when the operands match, but are swapped. 4873 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 4874 const Value *BLHS, const Value *BRHS, 4875 bool &IsSwappedOps) { 4876 4877 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 4878 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 4879 return IsMatchingOps || IsSwappedOps; 4880 } 4881 4882 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is 4883 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS 4884 /// BRHS" is false. Otherwise, return None if we can't infer anything. 4885 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 4886 const Value *ALHS, 4887 const Value *ARHS, 4888 CmpInst::Predicate BPred, 4889 const Value *BLHS, 4890 const Value *BRHS, 4891 bool IsSwappedOps) { 4892 // Canonicalize the operands so they're matching. 4893 if (IsSwappedOps) { 4894 std::swap(BLHS, BRHS); 4895 BPred = ICmpInst::getSwappedPredicate(BPred); 4896 } 4897 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 4898 return true; 4899 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 4900 return false; 4901 4902 return None; 4903 } 4904 4905 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is 4906 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS 4907 /// C2" is false. Otherwise, return None if we can't infer anything. 4908 static Optional<bool> 4909 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS, 4910 const ConstantInt *C1, 4911 CmpInst::Predicate BPred, 4912 const Value *BLHS, const ConstantInt *C2) { 4913 assert(ALHS == BLHS && "LHS operands must match."); 4914 ConstantRange DomCR = 4915 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 4916 ConstantRange CR = 4917 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); 4918 ConstantRange Intersection = DomCR.intersectWith(CR); 4919 ConstantRange Difference = DomCR.difference(CR); 4920 if (Intersection.isEmptySet()) 4921 return false; 4922 if (Difference.isEmptySet()) 4923 return true; 4924 return None; 4925 } 4926 4927 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 4928 /// false. Otherwise, return None if we can't infer anything. 4929 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS, 4930 const ICmpInst *RHS, 4931 const DataLayout &DL, bool LHSIsTrue, 4932 unsigned Depth) { 4933 Value *ALHS = LHS->getOperand(0); 4934 Value *ARHS = LHS->getOperand(1); 4935 // The rest of the logic assumes the LHS condition is true. If that's not the 4936 // case, invert the predicate to make it so. 4937 ICmpInst::Predicate APred = 4938 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); 4939 4940 Value *BLHS = RHS->getOperand(0); 4941 Value *BRHS = RHS->getOperand(1); 4942 ICmpInst::Predicate BPred = RHS->getPredicate(); 4943 4944 // Can we infer anything when the two compares have matching operands? 4945 bool IsSwappedOps; 4946 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) { 4947 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 4948 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps)) 4949 return Implication; 4950 // No amount of additional analysis will infer the second condition, so 4951 // early exit. 4952 return None; 4953 } 4954 4955 // Can we infer anything when the LHS operands match and the RHS operands are 4956 // constants (not necessarily matching)? 4957 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 4958 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 4959 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS, 4960 cast<ConstantInt>(BRHS))) 4961 return Implication; 4962 // No amount of additional analysis will infer the second condition, so 4963 // early exit. 4964 return None; 4965 } 4966 4967 if (APred == BPred) 4968 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth); 4969 return None; 4970 } 4971 4972 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 4973 /// false. Otherwise, return None if we can't infer anything. We expect the 4974 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction. 4975 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS, 4976 const ICmpInst *RHS, 4977 const DataLayout &DL, bool LHSIsTrue, 4978 unsigned Depth) { 4979 // The LHS must be an 'or' or an 'and' instruction. 4980 assert((LHS->getOpcode() == Instruction::And || 4981 LHS->getOpcode() == Instruction::Or) && 4982 "Expected LHS to be 'and' or 'or'."); 4983 4984 assert(Depth <= MaxDepth && "Hit recursion limit"); 4985 4986 // If the result of an 'or' is false, then we know both legs of the 'or' are 4987 // false. Similarly, if the result of an 'and' is true, then we know both 4988 // legs of the 'and' are true. 4989 Value *ALHS, *ARHS; 4990 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) || 4991 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) { 4992 // FIXME: Make this non-recursion. 4993 if (Optional<bool> Implication = 4994 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1)) 4995 return Implication; 4996 if (Optional<bool> Implication = 4997 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1)) 4998 return Implication; 4999 return None; 5000 } 5001 return None; 5002 } 5003 5004 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 5005 const DataLayout &DL, bool LHSIsTrue, 5006 unsigned Depth) { 5007 // Bail out when we hit the limit. 5008 if (Depth == MaxDepth) 5009 return None; 5010 5011 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for 5012 // example. 5013 if (LHS->getType() != RHS->getType()) 5014 return None; 5015 5016 Type *OpTy = LHS->getType(); 5017 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!"); 5018 5019 // LHS ==> RHS by definition 5020 if (LHS == RHS) 5021 return LHSIsTrue; 5022 5023 // FIXME: Extending the code below to handle vectors. 5024 if (OpTy->isVectorTy()) 5025 return None; 5026 5027 assert(OpTy->isIntegerTy(1) && "implied by above"); 5028 5029 // Both LHS and RHS are icmps. 5030 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); 5031 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS); 5032 if (LHSCmp && RHSCmp) 5033 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth); 5034 5035 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be 5036 // an icmp. FIXME: Add support for and/or on the RHS. 5037 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS); 5038 if (LHSBO && RHSCmp) { 5039 if ((LHSBO->getOpcode() == Instruction::And || 5040 LHSBO->getOpcode() == Instruction::Or)) 5041 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth); 5042 } 5043 return None; 5044 } 5045