1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains routines that help analyze properties that chains of 11 // computations have. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/GuardUtils.h" 30 #include "llvm/Analysis/InstructionSimplify.h" 31 #include "llvm/Analysis/Loads.h" 32 #include "llvm/Analysis/LoopInfo.h" 33 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 34 #include "llvm/Analysis/TargetLibraryInfo.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/CallSite.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/ConstantRange.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/DiagnosticInfo.h" 45 #include "llvm/IR/Dominators.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/GetElementPtrTypeIterator.h" 48 #include "llvm/IR/GlobalAlias.h" 49 #include "llvm/IR/GlobalValue.h" 50 #include "llvm/IR/GlobalVariable.h" 51 #include "llvm/IR/InstrTypes.h" 52 #include "llvm/IR/Instruction.h" 53 #include "llvm/IR/Instructions.h" 54 #include "llvm/IR/IntrinsicInst.h" 55 #include "llvm/IR/Intrinsics.h" 56 #include "llvm/IR/LLVMContext.h" 57 #include "llvm/IR/Metadata.h" 58 #include "llvm/IR/Module.h" 59 #include "llvm/IR/Operator.h" 60 #include "llvm/IR/PatternMatch.h" 61 #include "llvm/IR/Type.h" 62 #include "llvm/IR/User.h" 63 #include "llvm/IR/Value.h" 64 #include "llvm/Support/Casting.h" 65 #include "llvm/Support/CommandLine.h" 66 #include "llvm/Support/Compiler.h" 67 #include "llvm/Support/ErrorHandling.h" 68 #include "llvm/Support/KnownBits.h" 69 #include "llvm/Support/MathExtras.h" 70 #include <algorithm> 71 #include <array> 72 #include <cassert> 73 #include <cstdint> 74 #include <iterator> 75 #include <utility> 76 77 using namespace llvm; 78 using namespace llvm::PatternMatch; 79 80 const unsigned MaxDepth = 6; 81 82 // Controls the number of uses of the value searched for possible 83 // dominating comparisons. 84 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 85 cl::Hidden, cl::init(20)); 86 87 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 88 /// returns the element type's bitwidth. 89 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 90 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 91 return BitWidth; 92 93 return DL.getIndexTypeSizeInBits(Ty); 94 } 95 96 namespace { 97 98 // Simplifying using an assume can only be done in a particular control-flow 99 // context (the context instruction provides that context). If an assume and 100 // the context instruction are not in the same block then the DT helps in 101 // figuring out if we can use it. 102 struct Query { 103 const DataLayout &DL; 104 AssumptionCache *AC; 105 const Instruction *CxtI; 106 const DominatorTree *DT; 107 108 // Unlike the other analyses, this may be a nullptr because not all clients 109 // provide it currently. 110 OptimizationRemarkEmitter *ORE; 111 112 /// Set of assumptions that should be excluded from further queries. 113 /// This is because of the potential for mutual recursion to cause 114 /// computeKnownBits to repeatedly visit the same assume intrinsic. The 115 /// classic case of this is assume(x = y), which will attempt to determine 116 /// bits in x from bits in y, which will attempt to determine bits in y from 117 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 118 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo 119 /// (all of which can call computeKnownBits), and so on. 120 std::array<const Value *, MaxDepth> Excluded; 121 122 /// If true, it is safe to use metadata during simplification. 123 InstrInfoQuery IIQ; 124 125 unsigned NumExcluded = 0; 126 127 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 128 const DominatorTree *DT, bool UseInstrInfo, 129 OptimizationRemarkEmitter *ORE = nullptr) 130 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {} 131 132 Query(const Query &Q, const Value *NewExcl) 133 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ), 134 NumExcluded(Q.NumExcluded) { 135 Excluded = Q.Excluded; 136 Excluded[NumExcluded++] = NewExcl; 137 assert(NumExcluded <= Excluded.size()); 138 } 139 140 bool isExcluded(const Value *Value) const { 141 if (NumExcluded == 0) 142 return false; 143 auto End = Excluded.begin() + NumExcluded; 144 return std::find(Excluded.begin(), End, Value) != End; 145 } 146 }; 147 148 } // end anonymous namespace 149 150 // Given the provided Value and, potentially, a context instruction, return 151 // the preferred context instruction (if any). 152 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 153 // If we've been provided with a context instruction, then use that (provided 154 // it has been inserted). 155 if (CxtI && CxtI->getParent()) 156 return CxtI; 157 158 // If the value is really an already-inserted instruction, then use that. 159 CxtI = dyn_cast<Instruction>(V); 160 if (CxtI && CxtI->getParent()) 161 return CxtI; 162 163 return nullptr; 164 } 165 166 static void computeKnownBits(const Value *V, KnownBits &Known, 167 unsigned Depth, const Query &Q); 168 169 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 170 const DataLayout &DL, unsigned Depth, 171 AssumptionCache *AC, const Instruction *CxtI, 172 const DominatorTree *DT, 173 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { 174 ::computeKnownBits(V, Known, Depth, 175 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 176 } 177 178 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 179 const Query &Q); 180 181 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 182 unsigned Depth, AssumptionCache *AC, 183 const Instruction *CxtI, 184 const DominatorTree *DT, 185 OptimizationRemarkEmitter *ORE, 186 bool UseInstrInfo) { 187 return ::computeKnownBits( 188 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 189 } 190 191 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 192 const DataLayout &DL, AssumptionCache *AC, 193 const Instruction *CxtI, const DominatorTree *DT, 194 bool UseInstrInfo) { 195 assert(LHS->getType() == RHS->getType() && 196 "LHS and RHS should have the same type"); 197 assert(LHS->getType()->isIntOrIntVectorTy() && 198 "LHS and RHS should be integers"); 199 // Look for an inverted mask: (X & ~M) op (Y & M). 200 Value *M; 201 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 202 match(RHS, m_c_And(m_Specific(M), m_Value()))) 203 return true; 204 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 205 match(LHS, m_c_And(m_Specific(M), m_Value()))) 206 return true; 207 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 208 KnownBits LHSKnown(IT->getBitWidth()); 209 KnownBits RHSKnown(IT->getBitWidth()); 210 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 211 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 212 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue(); 213 } 214 215 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) { 216 for (const User *U : CxtI->users()) { 217 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) 218 if (IC->isEquality()) 219 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1))) 220 if (C->isNullValue()) 221 continue; 222 return false; 223 } 224 return true; 225 } 226 227 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 228 const Query &Q); 229 230 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 231 bool OrZero, unsigned Depth, 232 AssumptionCache *AC, const Instruction *CxtI, 233 const DominatorTree *DT, bool UseInstrInfo) { 234 return ::isKnownToBeAPowerOfTwo( 235 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 236 } 237 238 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 239 240 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 241 AssumptionCache *AC, const Instruction *CxtI, 242 const DominatorTree *DT, bool UseInstrInfo) { 243 return ::isKnownNonZero(V, Depth, 244 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 245 } 246 247 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 248 unsigned Depth, AssumptionCache *AC, 249 const Instruction *CxtI, const DominatorTree *DT, 250 bool UseInstrInfo) { 251 KnownBits Known = 252 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 253 return Known.isNonNegative(); 254 } 255 256 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 257 AssumptionCache *AC, const Instruction *CxtI, 258 const DominatorTree *DT, bool UseInstrInfo) { 259 if (auto *CI = dyn_cast<ConstantInt>(V)) 260 return CI->getValue().isStrictlyPositive(); 261 262 // TODO: We'd doing two recursive queries here. We should factor this such 263 // that only a single query is needed. 264 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) && 265 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); 266 } 267 268 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 269 AssumptionCache *AC, const Instruction *CxtI, 270 const DominatorTree *DT, bool UseInstrInfo) { 271 KnownBits Known = 272 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 273 return Known.isNegative(); 274 } 275 276 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); 277 278 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 279 const DataLayout &DL, AssumptionCache *AC, 280 const Instruction *CxtI, const DominatorTree *DT, 281 bool UseInstrInfo) { 282 return ::isKnownNonEqual(V1, V2, 283 Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT, 284 UseInstrInfo, /*ORE=*/nullptr)); 285 } 286 287 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 288 const Query &Q); 289 290 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 291 const DataLayout &DL, unsigned Depth, 292 AssumptionCache *AC, const Instruction *CxtI, 293 const DominatorTree *DT, bool UseInstrInfo) { 294 return ::MaskedValueIsZero( 295 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 296 } 297 298 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 299 const Query &Q); 300 301 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 302 unsigned Depth, AssumptionCache *AC, 303 const Instruction *CxtI, 304 const DominatorTree *DT, bool UseInstrInfo) { 305 return ::ComputeNumSignBits( 306 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 307 } 308 309 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 310 bool NSW, 311 KnownBits &KnownOut, KnownBits &Known2, 312 unsigned Depth, const Query &Q) { 313 unsigned BitWidth = KnownOut.getBitWidth(); 314 315 // If an initial sequence of bits in the result is not needed, the 316 // corresponding bits in the operands are not needed. 317 KnownBits LHSKnown(BitWidth); 318 computeKnownBits(Op0, LHSKnown, Depth + 1, Q); 319 computeKnownBits(Op1, Known2, Depth + 1, Q); 320 321 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2); 322 } 323 324 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 325 KnownBits &Known, KnownBits &Known2, 326 unsigned Depth, const Query &Q) { 327 unsigned BitWidth = Known.getBitWidth(); 328 computeKnownBits(Op1, Known, Depth + 1, Q); 329 computeKnownBits(Op0, Known2, Depth + 1, Q); 330 331 bool isKnownNegative = false; 332 bool isKnownNonNegative = false; 333 // If the multiplication is known not to overflow, compute the sign bit. 334 if (NSW) { 335 if (Op0 == Op1) { 336 // The product of a number with itself is non-negative. 337 isKnownNonNegative = true; 338 } else { 339 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 340 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 341 bool isKnownNegativeOp1 = Known.isNegative(); 342 bool isKnownNegativeOp0 = Known2.isNegative(); 343 // The product of two numbers with the same sign is non-negative. 344 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 345 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 346 // The product of a negative number and a non-negative number is either 347 // negative or zero. 348 if (!isKnownNonNegative) 349 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 350 isKnownNonZero(Op0, Depth, Q)) || 351 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 352 isKnownNonZero(Op1, Depth, Q)); 353 } 354 } 355 356 assert(!Known.hasConflict() && !Known2.hasConflict()); 357 // Compute a conservative estimate for high known-0 bits. 358 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 359 Known2.countMinLeadingZeros(), 360 BitWidth) - BitWidth; 361 LeadZ = std::min(LeadZ, BitWidth); 362 363 // The result of the bottom bits of an integer multiply can be 364 // inferred by looking at the bottom bits of both operands and 365 // multiplying them together. 366 // We can infer at least the minimum number of known trailing bits 367 // of both operands. Depending on number of trailing zeros, we can 368 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming 369 // a and b are divisible by m and n respectively. 370 // We then calculate how many of those bits are inferrable and set 371 // the output. For example, the i8 mul: 372 // a = XXXX1100 (12) 373 // b = XXXX1110 (14) 374 // We know the bottom 3 bits are zero since the first can be divided by 375 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4). 376 // Applying the multiplication to the trimmed arguments gets: 377 // XX11 (3) 378 // X111 (7) 379 // ------- 380 // XX11 381 // XX11 382 // XX11 383 // XX11 384 // ------- 385 // XXXXX01 386 // Which allows us to infer the 2 LSBs. Since we're multiplying the result 387 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits. 388 // The proof for this can be described as: 389 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) && 390 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) + 391 // umin(countTrailingZeros(C2), C6) + 392 // umin(C5 - umin(countTrailingZeros(C1), C5), 393 // C6 - umin(countTrailingZeros(C2), C6)))) - 1) 394 // %aa = shl i8 %a, C5 395 // %bb = shl i8 %b, C6 396 // %aaa = or i8 %aa, C1 397 // %bbb = or i8 %bb, C2 398 // %mul = mul i8 %aaa, %bbb 399 // %mask = and i8 %mul, C7 400 // => 401 // %mask = i8 ((C1*C2)&C7) 402 // Where C5, C6 describe the known bits of %a, %b 403 // C1, C2 describe the known bottom bits of %a, %b. 404 // C7 describes the mask of the known bits of the result. 405 APInt Bottom0 = Known.One; 406 APInt Bottom1 = Known2.One; 407 408 // How many times we'd be able to divide each argument by 2 (shr by 1). 409 // This gives us the number of trailing zeros on the multiplication result. 410 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes(); 411 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes(); 412 unsigned TrailZero0 = Known.countMinTrailingZeros(); 413 unsigned TrailZero1 = Known2.countMinTrailingZeros(); 414 unsigned TrailZ = TrailZero0 + TrailZero1; 415 416 // Figure out the fewest known-bits operand. 417 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0, 418 TrailBitsKnown1 - TrailZero1); 419 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth); 420 421 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) * 422 Bottom1.getLoBits(TrailBitsKnown1); 423 424 Known.resetAll(); 425 Known.Zero.setHighBits(LeadZ); 426 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown); 427 Known.One |= BottomKnown.getLoBits(ResultBitsKnown); 428 429 // Only make use of no-wrap flags if we failed to compute the sign bit 430 // directly. This matters if the multiplication always overflows, in 431 // which case we prefer to follow the result of the direct computation, 432 // though as the program is invoking undefined behaviour we can choose 433 // whatever we like here. 434 if (isKnownNonNegative && !Known.isNegative()) 435 Known.makeNonNegative(); 436 else if (isKnownNegative && !Known.isNonNegative()) 437 Known.makeNegative(); 438 } 439 440 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 441 KnownBits &Known) { 442 unsigned BitWidth = Known.getBitWidth(); 443 unsigned NumRanges = Ranges.getNumOperands() / 2; 444 assert(NumRanges >= 1); 445 446 Known.Zero.setAllBits(); 447 Known.One.setAllBits(); 448 449 for (unsigned i = 0; i < NumRanges; ++i) { 450 ConstantInt *Lower = 451 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 452 ConstantInt *Upper = 453 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 454 ConstantRange Range(Lower->getValue(), Upper->getValue()); 455 456 // The first CommonPrefixBits of all values in Range are equal. 457 unsigned CommonPrefixBits = 458 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 459 460 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 461 Known.One &= Range.getUnsignedMax() & Mask; 462 Known.Zero &= ~Range.getUnsignedMax() & Mask; 463 } 464 } 465 466 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 467 SmallVector<const Value *, 16> WorkSet(1, I); 468 SmallPtrSet<const Value *, 32> Visited; 469 SmallPtrSet<const Value *, 16> EphValues; 470 471 // The instruction defining an assumption's condition itself is always 472 // considered ephemeral to that assumption (even if it has other 473 // non-ephemeral users). See r246696's test case for an example. 474 if (is_contained(I->operands(), E)) 475 return true; 476 477 while (!WorkSet.empty()) { 478 const Value *V = WorkSet.pop_back_val(); 479 if (!Visited.insert(V).second) 480 continue; 481 482 // If all uses of this value are ephemeral, then so is this value. 483 if (llvm::all_of(V->users(), [&](const User *U) { 484 return EphValues.count(U); 485 })) { 486 if (V == E) 487 return true; 488 489 if (V == I || isSafeToSpeculativelyExecute(V)) { 490 EphValues.insert(V); 491 if (const User *U = dyn_cast<User>(V)) 492 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 493 J != JE; ++J) 494 WorkSet.push_back(*J); 495 } 496 } 497 } 498 499 return false; 500 } 501 502 // Is this an intrinsic that cannot be speculated but also cannot trap? 503 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { 504 if (const CallInst *CI = dyn_cast<CallInst>(I)) 505 if (Function *F = CI->getCalledFunction()) 506 switch (F->getIntrinsicID()) { 507 default: break; 508 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 509 case Intrinsic::assume: 510 case Intrinsic::sideeffect: 511 case Intrinsic::dbg_declare: 512 case Intrinsic::dbg_value: 513 case Intrinsic::dbg_label: 514 case Intrinsic::invariant_start: 515 case Intrinsic::invariant_end: 516 case Intrinsic::lifetime_start: 517 case Intrinsic::lifetime_end: 518 case Intrinsic::objectsize: 519 case Intrinsic::ptr_annotation: 520 case Intrinsic::var_annotation: 521 return true; 522 } 523 524 return false; 525 } 526 527 bool llvm::isValidAssumeForContext(const Instruction *Inv, 528 const Instruction *CxtI, 529 const DominatorTree *DT) { 530 // There are two restrictions on the use of an assume: 531 // 1. The assume must dominate the context (or the control flow must 532 // reach the assume whenever it reaches the context). 533 // 2. The context must not be in the assume's set of ephemeral values 534 // (otherwise we will use the assume to prove that the condition 535 // feeding the assume is trivially true, thus causing the removal of 536 // the assume). 537 538 if (DT) { 539 if (DT->dominates(Inv, CxtI)) 540 return true; 541 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 542 // We don't have a DT, but this trivially dominates. 543 return true; 544 } 545 546 // With or without a DT, the only remaining case we will check is if the 547 // instructions are in the same BB. Give up if that is not the case. 548 if (Inv->getParent() != CxtI->getParent()) 549 return false; 550 551 // If we have a dom tree, then we now know that the assume doesn't dominate 552 // the other instruction. If we don't have a dom tree then we can check if 553 // the assume is first in the BB. 554 if (!DT) { 555 // Search forward from the assume until we reach the context (or the end 556 // of the block); the common case is that the assume will come first. 557 for (auto I = std::next(BasicBlock::const_iterator(Inv)), 558 IE = Inv->getParent()->end(); I != IE; ++I) 559 if (&*I == CxtI) 560 return true; 561 } 562 563 // The context comes first, but they're both in the same block. Make sure 564 // there is nothing in between that might interrupt the control flow. 565 for (BasicBlock::const_iterator I = 566 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv); 567 I != IE; ++I) 568 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 569 return false; 570 571 return !isEphemeralValueOf(Inv, CxtI); 572 } 573 574 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 575 unsigned Depth, const Query &Q) { 576 // Use of assumptions is context-sensitive. If we don't have a context, we 577 // cannot use them! 578 if (!Q.AC || !Q.CxtI) 579 return; 580 581 unsigned BitWidth = Known.getBitWidth(); 582 583 // Note that the patterns below need to be kept in sync with the code 584 // in AssumptionCache::updateAffectedValues. 585 586 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 587 if (!AssumeVH) 588 continue; 589 CallInst *I = cast<CallInst>(AssumeVH); 590 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 591 "Got assumption for the wrong function!"); 592 if (Q.isExcluded(I)) 593 continue; 594 595 // Warning: This loop can end up being somewhat performance sensitive. 596 // We're running this loop for once for each value queried resulting in a 597 // runtime of ~O(#assumes * #values). 598 599 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 600 "must be an assume intrinsic"); 601 602 Value *Arg = I->getArgOperand(0); 603 604 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 605 assert(BitWidth == 1 && "assume operand is not i1?"); 606 Known.setAllOnes(); 607 return; 608 } 609 if (match(Arg, m_Not(m_Specific(V))) && 610 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 611 assert(BitWidth == 1 && "assume operand is not i1?"); 612 Known.setAllZero(); 613 return; 614 } 615 616 // The remaining tests are all recursive, so bail out if we hit the limit. 617 if (Depth == MaxDepth) 618 continue; 619 620 Value *A, *B; 621 auto m_V = m_CombineOr(m_Specific(V), 622 m_CombineOr(m_PtrToInt(m_Specific(V)), 623 m_BitCast(m_Specific(V)))); 624 625 CmpInst::Predicate Pred; 626 uint64_t C; 627 // assume(v = a) 628 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 629 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 630 KnownBits RHSKnown(BitWidth); 631 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 632 Known.Zero |= RHSKnown.Zero; 633 Known.One |= RHSKnown.One; 634 // assume(v & b = a) 635 } else if (match(Arg, 636 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 637 Pred == ICmpInst::ICMP_EQ && 638 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 639 KnownBits RHSKnown(BitWidth); 640 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 641 KnownBits MaskKnown(BitWidth); 642 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 643 644 // For those bits in the mask that are known to be one, we can propagate 645 // known bits from the RHS to V. 646 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 647 Known.One |= RHSKnown.One & MaskKnown.One; 648 // assume(~(v & b) = a) 649 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 650 m_Value(A))) && 651 Pred == ICmpInst::ICMP_EQ && 652 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 653 KnownBits RHSKnown(BitWidth); 654 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 655 KnownBits MaskKnown(BitWidth); 656 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 657 658 // For those bits in the mask that are known to be one, we can propagate 659 // inverted known bits from the RHS to V. 660 Known.Zero |= RHSKnown.One & MaskKnown.One; 661 Known.One |= RHSKnown.Zero & MaskKnown.One; 662 // assume(v | b = a) 663 } else if (match(Arg, 664 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 665 Pred == ICmpInst::ICMP_EQ && 666 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 667 KnownBits RHSKnown(BitWidth); 668 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 669 KnownBits BKnown(BitWidth); 670 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 671 672 // For those bits in B that are known to be zero, we can propagate known 673 // bits from the RHS to V. 674 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 675 Known.One |= RHSKnown.One & BKnown.Zero; 676 // assume(~(v | b) = a) 677 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 678 m_Value(A))) && 679 Pred == ICmpInst::ICMP_EQ && 680 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 681 KnownBits RHSKnown(BitWidth); 682 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 683 KnownBits BKnown(BitWidth); 684 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 685 686 // For those bits in B that are known to be zero, we can propagate 687 // inverted known bits from the RHS to V. 688 Known.Zero |= RHSKnown.One & BKnown.Zero; 689 Known.One |= RHSKnown.Zero & BKnown.Zero; 690 // assume(v ^ b = a) 691 } else if (match(Arg, 692 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 693 Pred == ICmpInst::ICMP_EQ && 694 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 695 KnownBits RHSKnown(BitWidth); 696 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 697 KnownBits BKnown(BitWidth); 698 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 699 700 // For those bits in B that are known to be zero, we can propagate known 701 // bits from the RHS to V. For those bits in B that are known to be one, 702 // we can propagate inverted known bits from the RHS to V. 703 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 704 Known.One |= RHSKnown.One & BKnown.Zero; 705 Known.Zero |= RHSKnown.One & BKnown.One; 706 Known.One |= RHSKnown.Zero & BKnown.One; 707 // assume(~(v ^ b) = a) 708 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 709 m_Value(A))) && 710 Pred == ICmpInst::ICMP_EQ && 711 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 712 KnownBits RHSKnown(BitWidth); 713 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 714 KnownBits BKnown(BitWidth); 715 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 716 717 // For those bits in B that are known to be zero, we can propagate 718 // inverted known bits from the RHS to V. For those bits in B that are 719 // known to be one, we can propagate known bits from the RHS to V. 720 Known.Zero |= RHSKnown.One & BKnown.Zero; 721 Known.One |= RHSKnown.Zero & BKnown.Zero; 722 Known.Zero |= RHSKnown.Zero & BKnown.One; 723 Known.One |= RHSKnown.One & BKnown.One; 724 // assume(v << c = a) 725 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 726 m_Value(A))) && 727 Pred == ICmpInst::ICMP_EQ && 728 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 729 C < BitWidth) { 730 KnownBits RHSKnown(BitWidth); 731 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 732 // For those bits in RHS that are known, we can propagate them to known 733 // bits in V shifted to the right by C. 734 RHSKnown.Zero.lshrInPlace(C); 735 Known.Zero |= RHSKnown.Zero; 736 RHSKnown.One.lshrInPlace(C); 737 Known.One |= RHSKnown.One; 738 // assume(~(v << c) = a) 739 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 740 m_Value(A))) && 741 Pred == ICmpInst::ICMP_EQ && 742 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 743 C < BitWidth) { 744 KnownBits RHSKnown(BitWidth); 745 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 746 // For those bits in RHS that are known, we can propagate them inverted 747 // to known bits in V shifted to the right by C. 748 RHSKnown.One.lshrInPlace(C); 749 Known.Zero |= RHSKnown.One; 750 RHSKnown.Zero.lshrInPlace(C); 751 Known.One |= RHSKnown.Zero; 752 // assume(v >> c = a) 753 } else if (match(Arg, 754 m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), 755 m_Value(A))) && 756 Pred == ICmpInst::ICMP_EQ && 757 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 758 C < BitWidth) { 759 KnownBits RHSKnown(BitWidth); 760 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 761 // For those bits in RHS that are known, we can propagate them to known 762 // bits in V shifted to the right by C. 763 Known.Zero |= RHSKnown.Zero << C; 764 Known.One |= RHSKnown.One << C; 765 // assume(~(v >> c) = a) 766 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), 767 m_Value(A))) && 768 Pred == ICmpInst::ICMP_EQ && 769 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 770 C < BitWidth) { 771 KnownBits RHSKnown(BitWidth); 772 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 773 // For those bits in RHS that are known, we can propagate them inverted 774 // to known bits in V shifted to the right by C. 775 Known.Zero |= RHSKnown.One << C; 776 Known.One |= RHSKnown.Zero << C; 777 // assume(v >=_s c) where c is non-negative 778 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 779 Pred == ICmpInst::ICMP_SGE && 780 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 781 KnownBits RHSKnown(BitWidth); 782 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 783 784 if (RHSKnown.isNonNegative()) { 785 // We know that the sign bit is zero. 786 Known.makeNonNegative(); 787 } 788 // assume(v >_s c) where c is at least -1. 789 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 790 Pred == ICmpInst::ICMP_SGT && 791 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 792 KnownBits RHSKnown(BitWidth); 793 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 794 795 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { 796 // We know that the sign bit is zero. 797 Known.makeNonNegative(); 798 } 799 // assume(v <=_s c) where c is negative 800 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 801 Pred == ICmpInst::ICMP_SLE && 802 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 803 KnownBits RHSKnown(BitWidth); 804 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 805 806 if (RHSKnown.isNegative()) { 807 // We know that the sign bit is one. 808 Known.makeNegative(); 809 } 810 // assume(v <_s c) where c is non-positive 811 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 812 Pred == ICmpInst::ICMP_SLT && 813 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 814 KnownBits RHSKnown(BitWidth); 815 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 816 817 if (RHSKnown.isZero() || RHSKnown.isNegative()) { 818 // We know that the sign bit is one. 819 Known.makeNegative(); 820 } 821 // assume(v <=_u c) 822 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 823 Pred == ICmpInst::ICMP_ULE && 824 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 825 KnownBits RHSKnown(BitWidth); 826 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 827 828 // Whatever high bits in c are zero are known to be zero. 829 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 830 // assume(v <_u c) 831 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 832 Pred == ICmpInst::ICMP_ULT && 833 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 834 KnownBits RHSKnown(BitWidth); 835 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 836 837 // If the RHS is known zero, then this assumption must be wrong (nothing 838 // is unsigned less than zero). Signal a conflict and get out of here. 839 if (RHSKnown.isZero()) { 840 Known.Zero.setAllBits(); 841 Known.One.setAllBits(); 842 break; 843 } 844 845 // Whatever high bits in c are zero are known to be zero (if c is a power 846 // of 2, then one more). 847 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) 848 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); 849 else 850 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 851 } 852 } 853 854 // If assumptions conflict with each other or previous known bits, then we 855 // have a logical fallacy. It's possible that the assumption is not reachable, 856 // so this isn't a real bug. On the other hand, the program may have undefined 857 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 858 // clear out the known bits, try to warn the user, and hope for the best. 859 if (Known.Zero.intersects(Known.One)) { 860 Known.resetAll(); 861 862 if (Q.ORE) 863 Q.ORE->emit([&]() { 864 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 865 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", 866 CxtI) 867 << "Detected conflicting code assumptions. Program may " 868 "have undefined behavior, or compiler may have " 869 "internal error."; 870 }); 871 } 872 } 873 874 /// Compute known bits from a shift operator, including those with a 875 /// non-constant shift amount. Known is the output of this function. Known2 is a 876 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are 877 /// operator-specific functions that, given the known-zero or known-one bits 878 /// respectively, and a shift amount, compute the implied known-zero or 879 /// known-one bits of the shift operator's result respectively for that shift 880 /// amount. The results from calling KZF and KOF are conservatively combined for 881 /// all permitted shift amounts. 882 static void computeKnownBitsFromShiftOperator( 883 const Operator *I, KnownBits &Known, KnownBits &Known2, 884 unsigned Depth, const Query &Q, 885 function_ref<APInt(const APInt &, unsigned)> KZF, 886 function_ref<APInt(const APInt &, unsigned)> KOF) { 887 unsigned BitWidth = Known.getBitWidth(); 888 889 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 890 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); 891 892 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 893 Known.Zero = KZF(Known.Zero, ShiftAmt); 894 Known.One = KOF(Known.One, ShiftAmt); 895 // If the known bits conflict, this must be an overflowing left shift, so 896 // the shift result is poison. We can return anything we want. Choose 0 for 897 // the best folding opportunity. 898 if (Known.hasConflict()) 899 Known.setAllZero(); 900 901 return; 902 } 903 904 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 905 906 // If the shift amount could be greater than or equal to the bit-width of the 907 // LHS, the value could be poison, but bail out because the check below is 908 // expensive. TODO: Should we just carry on? 909 if ((~Known.Zero).uge(BitWidth)) { 910 Known.resetAll(); 911 return; 912 } 913 914 // Note: We cannot use Known.Zero.getLimitedValue() here, because if 915 // BitWidth > 64 and any upper bits are known, we'll end up returning the 916 // limit value (which implies all bits are known). 917 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); 918 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); 919 920 // It would be more-clearly correct to use the two temporaries for this 921 // calculation. Reusing the APInts here to prevent unnecessary allocations. 922 Known.resetAll(); 923 924 // If we know the shifter operand is nonzero, we can sometimes infer more 925 // known bits. However this is expensive to compute, so be lazy about it and 926 // only compute it when absolutely necessary. 927 Optional<bool> ShifterOperandIsNonZero; 928 929 // Early exit if we can't constrain any well-defined shift amount. 930 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && 931 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { 932 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q); 933 if (!*ShifterOperandIsNonZero) 934 return; 935 } 936 937 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 938 939 Known.Zero.setAllBits(); 940 Known.One.setAllBits(); 941 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 942 // Combine the shifted known input bits only for those shift amounts 943 // compatible with its known constraints. 944 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 945 continue; 946 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 947 continue; 948 // If we know the shifter is nonzero, we may be able to infer more known 949 // bits. This check is sunk down as far as possible to avoid the expensive 950 // call to isKnownNonZero if the cheaper checks above fail. 951 if (ShiftAmt == 0) { 952 if (!ShifterOperandIsNonZero.hasValue()) 953 ShifterOperandIsNonZero = 954 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 955 if (*ShifterOperandIsNonZero) 956 continue; 957 } 958 959 Known.Zero &= KZF(Known2.Zero, ShiftAmt); 960 Known.One &= KOF(Known2.One, ShiftAmt); 961 } 962 963 // If the known bits conflict, the result is poison. Return a 0 and hope the 964 // caller can further optimize that. 965 if (Known.hasConflict()) 966 Known.setAllZero(); 967 } 968 969 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known, 970 unsigned Depth, const Query &Q) { 971 unsigned BitWidth = Known.getBitWidth(); 972 973 KnownBits Known2(Known); 974 switch (I->getOpcode()) { 975 default: break; 976 case Instruction::Load: 977 if (MDNode *MD = 978 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range)) 979 computeKnownBitsFromRangeMetadata(*MD, Known); 980 break; 981 case Instruction::And: { 982 // If either the LHS or the RHS are Zero, the result is zero. 983 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 984 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 985 986 // Output known-1 bits are only known if set in both the LHS & RHS. 987 Known.One &= Known2.One; 988 // Output known-0 are known to be clear if zero in either the LHS | RHS. 989 Known.Zero |= Known2.Zero; 990 991 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 992 // here we handle the more general case of adding any odd number by 993 // matching the form add(x, add(x, y)) where y is odd. 994 // TODO: This could be generalized to clearing any bit set in y where the 995 // following bit is known to be unset in y. 996 Value *X = nullptr, *Y = nullptr; 997 if (!Known.Zero[0] && !Known.One[0] && 998 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) { 999 Known2.resetAll(); 1000 computeKnownBits(Y, Known2, Depth + 1, Q); 1001 if (Known2.countMinTrailingOnes() > 0) 1002 Known.Zero.setBit(0); 1003 } 1004 break; 1005 } 1006 case Instruction::Or: 1007 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 1008 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1009 1010 // Output known-0 bits are only known if clear in both the LHS & RHS. 1011 Known.Zero &= Known2.Zero; 1012 // Output known-1 are known to be set if set in either the LHS | RHS. 1013 Known.One |= Known2.One; 1014 break; 1015 case Instruction::Xor: { 1016 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 1017 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1018 1019 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1020 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 1021 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1022 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 1023 Known.Zero = std::move(KnownZeroOut); 1024 break; 1025 } 1026 case Instruction::Mul: { 1027 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1028 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known, 1029 Known2, Depth, Q); 1030 break; 1031 } 1032 case Instruction::UDiv: { 1033 // For the purposes of computing leading zeros we can conservatively 1034 // treat a udiv as a logical right shift by the power of 2 known to 1035 // be less than the denominator. 1036 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1037 unsigned LeadZ = Known2.countMinLeadingZeros(); 1038 1039 Known2.resetAll(); 1040 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1041 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 1042 if (RHSMaxLeadingZeros != BitWidth) 1043 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 1044 1045 Known.Zero.setHighBits(LeadZ); 1046 break; 1047 } 1048 case Instruction::Select: { 1049 const Value *LHS, *RHS; 1050 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 1051 if (SelectPatternResult::isMinOrMax(SPF)) { 1052 computeKnownBits(RHS, Known, Depth + 1, Q); 1053 computeKnownBits(LHS, Known2, Depth + 1, Q); 1054 } else { 1055 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 1056 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1057 } 1058 1059 unsigned MaxHighOnes = 0; 1060 unsigned MaxHighZeros = 0; 1061 if (SPF == SPF_SMAX) { 1062 // If both sides are negative, the result is negative. 1063 if (Known.isNegative() && Known2.isNegative()) 1064 // We can derive a lower bound on the result by taking the max of the 1065 // leading one bits. 1066 MaxHighOnes = 1067 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1068 // If either side is non-negative, the result is non-negative. 1069 else if (Known.isNonNegative() || Known2.isNonNegative()) 1070 MaxHighZeros = 1; 1071 } else if (SPF == SPF_SMIN) { 1072 // If both sides are non-negative, the result is non-negative. 1073 if (Known.isNonNegative() && Known2.isNonNegative()) 1074 // We can derive an upper bound on the result by taking the max of the 1075 // leading zero bits. 1076 MaxHighZeros = std::max(Known.countMinLeadingZeros(), 1077 Known2.countMinLeadingZeros()); 1078 // If either side is negative, the result is negative. 1079 else if (Known.isNegative() || Known2.isNegative()) 1080 MaxHighOnes = 1; 1081 } else if (SPF == SPF_UMAX) { 1082 // We can derive a lower bound on the result by taking the max of the 1083 // leading one bits. 1084 MaxHighOnes = 1085 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1086 } else if (SPF == SPF_UMIN) { 1087 // We can derive an upper bound on the result by taking the max of the 1088 // leading zero bits. 1089 MaxHighZeros = 1090 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1091 } else if (SPF == SPF_ABS) { 1092 // RHS from matchSelectPattern returns the negation part of abs pattern. 1093 // If the negate has an NSW flag we can assume the sign bit of the result 1094 // will be 0 because that makes abs(INT_MIN) undefined. 1095 if (Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) 1096 MaxHighZeros = 1; 1097 } 1098 1099 // Only known if known in both the LHS and RHS. 1100 Known.One &= Known2.One; 1101 Known.Zero &= Known2.Zero; 1102 if (MaxHighOnes > 0) 1103 Known.One.setHighBits(MaxHighOnes); 1104 if (MaxHighZeros > 0) 1105 Known.Zero.setHighBits(MaxHighZeros); 1106 break; 1107 } 1108 case Instruction::FPTrunc: 1109 case Instruction::FPExt: 1110 case Instruction::FPToUI: 1111 case Instruction::FPToSI: 1112 case Instruction::SIToFP: 1113 case Instruction::UIToFP: 1114 break; // Can't work with floating point. 1115 case Instruction::PtrToInt: 1116 case Instruction::IntToPtr: 1117 // Fall through and handle them the same as zext/trunc. 1118 LLVM_FALLTHROUGH; 1119 case Instruction::ZExt: 1120 case Instruction::Trunc: { 1121 Type *SrcTy = I->getOperand(0)->getType(); 1122 1123 unsigned SrcBitWidth; 1124 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1125 // which fall through here. 1126 Type *ScalarTy = SrcTy->getScalarType(); 1127 SrcBitWidth = ScalarTy->isPointerTy() ? 1128 Q.DL.getIndexTypeSizeInBits(ScalarTy) : 1129 Q.DL.getTypeSizeInBits(ScalarTy); 1130 1131 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1132 Known = Known.zextOrTrunc(SrcBitWidth); 1133 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1134 Known = Known.zextOrTrunc(BitWidth); 1135 // Any top bits are known to be zero. 1136 if (BitWidth > SrcBitWidth) 1137 Known.Zero.setBitsFrom(SrcBitWidth); 1138 break; 1139 } 1140 case Instruction::BitCast: { 1141 Type *SrcTy = I->getOperand(0)->getType(); 1142 if (SrcTy->isIntOrPtrTy() && 1143 // TODO: For now, not handling conversions like: 1144 // (bitcast i64 %x to <2 x i32>) 1145 !I->getType()->isVectorTy()) { 1146 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1147 break; 1148 } 1149 break; 1150 } 1151 case Instruction::SExt: { 1152 // Compute the bits in the result that are not present in the input. 1153 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1154 1155 Known = Known.trunc(SrcBitWidth); 1156 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1157 // If the sign bit of the input is known set or clear, then we know the 1158 // top bits of the result. 1159 Known = Known.sext(BitWidth); 1160 break; 1161 } 1162 case Instruction::Shl: { 1163 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1164 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1165 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) { 1166 APInt KZResult = KnownZero << ShiftAmt; 1167 KZResult.setLowBits(ShiftAmt); // Low bits known 0. 1168 // If this shift has "nsw" keyword, then the result is either a poison 1169 // value or has the same sign bit as the first operand. 1170 if (NSW && KnownZero.isSignBitSet()) 1171 KZResult.setSignBit(); 1172 return KZResult; 1173 }; 1174 1175 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) { 1176 APInt KOResult = KnownOne << ShiftAmt; 1177 if (NSW && KnownOne.isSignBitSet()) 1178 KOResult.setSignBit(); 1179 return KOResult; 1180 }; 1181 1182 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1183 break; 1184 } 1185 case Instruction::LShr: { 1186 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1187 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1188 APInt KZResult = KnownZero.lshr(ShiftAmt); 1189 // High bits known zero. 1190 KZResult.setHighBits(ShiftAmt); 1191 return KZResult; 1192 }; 1193 1194 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1195 return KnownOne.lshr(ShiftAmt); 1196 }; 1197 1198 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1199 break; 1200 } 1201 case Instruction::AShr: { 1202 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1203 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1204 return KnownZero.ashr(ShiftAmt); 1205 }; 1206 1207 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1208 return KnownOne.ashr(ShiftAmt); 1209 }; 1210 1211 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1212 break; 1213 } 1214 case Instruction::Sub: { 1215 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1216 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1217 Known, Known2, Depth, Q); 1218 break; 1219 } 1220 case Instruction::Add: { 1221 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1222 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1223 Known, Known2, Depth, Q); 1224 break; 1225 } 1226 case Instruction::SRem: 1227 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1228 APInt RA = Rem->getValue().abs(); 1229 if (RA.isPowerOf2()) { 1230 APInt LowBits = RA - 1; 1231 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1232 1233 // The low bits of the first operand are unchanged by the srem. 1234 Known.Zero = Known2.Zero & LowBits; 1235 Known.One = Known2.One & LowBits; 1236 1237 // If the first operand is non-negative or has all low bits zero, then 1238 // the upper bits are all zero. 1239 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 1240 Known.Zero |= ~LowBits; 1241 1242 // If the first operand is negative and not all low bits are zero, then 1243 // the upper bits are all one. 1244 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 1245 Known.One |= ~LowBits; 1246 1247 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1248 break; 1249 } 1250 } 1251 1252 // The sign bit is the LHS's sign bit, except when the result of the 1253 // remainder is zero. 1254 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1255 // If it's known zero, our sign bit is also zero. 1256 if (Known2.isNonNegative()) 1257 Known.makeNonNegative(); 1258 1259 break; 1260 case Instruction::URem: { 1261 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1262 const APInt &RA = Rem->getValue(); 1263 if (RA.isPowerOf2()) { 1264 APInt LowBits = (RA - 1); 1265 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1266 Known.Zero |= ~LowBits; 1267 Known.One &= LowBits; 1268 break; 1269 } 1270 } 1271 1272 // Since the result is less than or equal to either operand, any leading 1273 // zero bits in either operand must also exist in the result. 1274 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1275 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1276 1277 unsigned Leaders = 1278 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1279 Known.resetAll(); 1280 Known.Zero.setHighBits(Leaders); 1281 break; 1282 } 1283 1284 case Instruction::Alloca: { 1285 const AllocaInst *AI = cast<AllocaInst>(I); 1286 unsigned Align = AI->getAlignment(); 1287 if (Align == 0) 1288 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); 1289 1290 if (Align > 0) 1291 Known.Zero.setLowBits(countTrailingZeros(Align)); 1292 break; 1293 } 1294 case Instruction::GetElementPtr: { 1295 // Analyze all of the subscripts of this getelementptr instruction 1296 // to determine if we can prove known low zero bits. 1297 KnownBits LocalKnown(BitWidth); 1298 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q); 1299 unsigned TrailZ = LocalKnown.countMinTrailingZeros(); 1300 1301 gep_type_iterator GTI = gep_type_begin(I); 1302 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1303 Value *Index = I->getOperand(i); 1304 if (StructType *STy = GTI.getStructTypeOrNull()) { 1305 // Handle struct member offset arithmetic. 1306 1307 // Handle case when index is vector zeroinitializer 1308 Constant *CIndex = cast<Constant>(Index); 1309 if (CIndex->isZeroValue()) 1310 continue; 1311 1312 if (CIndex->getType()->isVectorTy()) 1313 Index = CIndex->getSplatValue(); 1314 1315 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1316 const StructLayout *SL = Q.DL.getStructLayout(STy); 1317 uint64_t Offset = SL->getElementOffset(Idx); 1318 TrailZ = std::min<unsigned>(TrailZ, 1319 countTrailingZeros(Offset)); 1320 } else { 1321 // Handle array index arithmetic. 1322 Type *IndexedTy = GTI.getIndexedType(); 1323 if (!IndexedTy->isSized()) { 1324 TrailZ = 0; 1325 break; 1326 } 1327 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1328 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1329 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0); 1330 computeKnownBits(Index, LocalKnown, Depth + 1, Q); 1331 TrailZ = std::min(TrailZ, 1332 unsigned(countTrailingZeros(TypeSize) + 1333 LocalKnown.countMinTrailingZeros())); 1334 } 1335 } 1336 1337 Known.Zero.setLowBits(TrailZ); 1338 break; 1339 } 1340 case Instruction::PHI: { 1341 const PHINode *P = cast<PHINode>(I); 1342 // Handle the case of a simple two-predecessor recurrence PHI. 1343 // There's a lot more that could theoretically be done here, but 1344 // this is sufficient to catch some interesting cases. 1345 if (P->getNumIncomingValues() == 2) { 1346 for (unsigned i = 0; i != 2; ++i) { 1347 Value *L = P->getIncomingValue(i); 1348 Value *R = P->getIncomingValue(!i); 1349 Operator *LU = dyn_cast<Operator>(L); 1350 if (!LU) 1351 continue; 1352 unsigned Opcode = LU->getOpcode(); 1353 // Check for operations that have the property that if 1354 // both their operands have low zero bits, the result 1355 // will have low zero bits. 1356 if (Opcode == Instruction::Add || 1357 Opcode == Instruction::Sub || 1358 Opcode == Instruction::And || 1359 Opcode == Instruction::Or || 1360 Opcode == Instruction::Mul) { 1361 Value *LL = LU->getOperand(0); 1362 Value *LR = LU->getOperand(1); 1363 // Find a recurrence. 1364 if (LL == I) 1365 L = LR; 1366 else if (LR == I) 1367 L = LL; 1368 else 1369 break; 1370 // Ok, we have a PHI of the form L op= R. Check for low 1371 // zero bits. 1372 computeKnownBits(R, Known2, Depth + 1, Q); 1373 1374 // We need to take the minimum number of known bits 1375 KnownBits Known3(Known); 1376 computeKnownBits(L, Known3, Depth + 1, Q); 1377 1378 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1379 Known3.countMinTrailingZeros())); 1380 1381 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU); 1382 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) { 1383 // If initial value of recurrence is nonnegative, and we are adding 1384 // a nonnegative number with nsw, the result can only be nonnegative 1385 // or poison value regardless of the number of times we execute the 1386 // add in phi recurrence. If initial value is negative and we are 1387 // adding a negative number with nsw, the result can only be 1388 // negative or poison value. Similar arguments apply to sub and mul. 1389 // 1390 // (add non-negative, non-negative) --> non-negative 1391 // (add negative, negative) --> negative 1392 if (Opcode == Instruction::Add) { 1393 if (Known2.isNonNegative() && Known3.isNonNegative()) 1394 Known.makeNonNegative(); 1395 else if (Known2.isNegative() && Known3.isNegative()) 1396 Known.makeNegative(); 1397 } 1398 1399 // (sub nsw non-negative, negative) --> non-negative 1400 // (sub nsw negative, non-negative) --> negative 1401 else if (Opcode == Instruction::Sub && LL == I) { 1402 if (Known2.isNonNegative() && Known3.isNegative()) 1403 Known.makeNonNegative(); 1404 else if (Known2.isNegative() && Known3.isNonNegative()) 1405 Known.makeNegative(); 1406 } 1407 1408 // (mul nsw non-negative, non-negative) --> non-negative 1409 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1410 Known3.isNonNegative()) 1411 Known.makeNonNegative(); 1412 } 1413 1414 break; 1415 } 1416 } 1417 } 1418 1419 // Unreachable blocks may have zero-operand PHI nodes. 1420 if (P->getNumIncomingValues() == 0) 1421 break; 1422 1423 // Otherwise take the unions of the known bit sets of the operands, 1424 // taking conservative care to avoid excessive recursion. 1425 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) { 1426 // Skip if every incoming value references to ourself. 1427 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1428 break; 1429 1430 Known.Zero.setAllBits(); 1431 Known.One.setAllBits(); 1432 for (Value *IncValue : P->incoming_values()) { 1433 // Skip direct self references. 1434 if (IncValue == P) continue; 1435 1436 Known2 = KnownBits(BitWidth); 1437 // Recurse, but cap the recursion to one level, because we don't 1438 // want to waste time spinning around in loops. 1439 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q); 1440 Known.Zero &= Known2.Zero; 1441 Known.One &= Known2.One; 1442 // If all bits have been ruled out, there's no need to check 1443 // more operands. 1444 if (!Known.Zero && !Known.One) 1445 break; 1446 } 1447 } 1448 break; 1449 } 1450 case Instruction::Call: 1451 case Instruction::Invoke: 1452 // If range metadata is attached to this call, set known bits from that, 1453 // and then intersect with known bits based on other properties of the 1454 // function. 1455 if (MDNode *MD = 1456 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range)) 1457 computeKnownBitsFromRangeMetadata(*MD, Known); 1458 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) { 1459 computeKnownBits(RV, Known2, Depth + 1, Q); 1460 Known.Zero |= Known2.Zero; 1461 Known.One |= Known2.One; 1462 } 1463 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1464 switch (II->getIntrinsicID()) { 1465 default: break; 1466 case Intrinsic::bitreverse: 1467 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1468 Known.Zero |= Known2.Zero.reverseBits(); 1469 Known.One |= Known2.One.reverseBits(); 1470 break; 1471 case Intrinsic::bswap: 1472 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1473 Known.Zero |= Known2.Zero.byteSwap(); 1474 Known.One |= Known2.One.byteSwap(); 1475 break; 1476 case Intrinsic::ctlz: { 1477 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1478 // If we have a known 1, its position is our upper bound. 1479 unsigned PossibleLZ = Known2.One.countLeadingZeros(); 1480 // If this call is undefined for 0, the result will be less than 2^n. 1481 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1482 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1483 unsigned LowBits = Log2_32(PossibleLZ)+1; 1484 Known.Zero.setBitsFrom(LowBits); 1485 break; 1486 } 1487 case Intrinsic::cttz: { 1488 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1489 // If we have a known 1, its position is our upper bound. 1490 unsigned PossibleTZ = Known2.One.countTrailingZeros(); 1491 // If this call is undefined for 0, the result will be less than 2^n. 1492 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1493 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1494 unsigned LowBits = Log2_32(PossibleTZ)+1; 1495 Known.Zero.setBitsFrom(LowBits); 1496 break; 1497 } 1498 case Intrinsic::ctpop: { 1499 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1500 // We can bound the space the count needs. Also, bits known to be zero 1501 // can't contribute to the population. 1502 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1503 unsigned LowBits = Log2_32(BitsPossiblySet)+1; 1504 Known.Zero.setBitsFrom(LowBits); 1505 // TODO: we could bound KnownOne using the lower bound on the number 1506 // of bits which might be set provided by popcnt KnownOne2. 1507 break; 1508 } 1509 case Intrinsic::fshr: 1510 case Intrinsic::fshl: { 1511 const APInt *SA; 1512 if (!match(I->getOperand(2), m_APInt(SA))) 1513 break; 1514 1515 // Normalize to funnel shift left. 1516 uint64_t ShiftAmt = SA->urem(BitWidth); 1517 if (II->getIntrinsicID() == Intrinsic::fshr) 1518 ShiftAmt = BitWidth - ShiftAmt; 1519 1520 KnownBits Known3(Known); 1521 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1522 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q); 1523 1524 Known.Zero = 1525 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); 1526 Known.One = 1527 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt); 1528 break; 1529 } 1530 case Intrinsic::x86_sse42_crc32_64_64: 1531 Known.Zero.setBitsFrom(32); 1532 break; 1533 } 1534 } 1535 break; 1536 case Instruction::ExtractElement: 1537 // Look through extract element. At the moment we keep this simple and skip 1538 // tracking the specific element. But at least we might find information 1539 // valid for all elements of the vector (for example if vector is sign 1540 // extended, shifted, etc). 1541 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1542 break; 1543 case Instruction::ExtractValue: 1544 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1545 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1546 if (EVI->getNumIndices() != 1) break; 1547 if (EVI->getIndices()[0] == 0) { 1548 switch (II->getIntrinsicID()) { 1549 default: break; 1550 case Intrinsic::uadd_with_overflow: 1551 case Intrinsic::sadd_with_overflow: 1552 computeKnownBitsAddSub(true, II->getArgOperand(0), 1553 II->getArgOperand(1), false, Known, Known2, 1554 Depth, Q); 1555 break; 1556 case Intrinsic::usub_with_overflow: 1557 case Intrinsic::ssub_with_overflow: 1558 computeKnownBitsAddSub(false, II->getArgOperand(0), 1559 II->getArgOperand(1), false, Known, Known2, 1560 Depth, Q); 1561 break; 1562 case Intrinsic::umul_with_overflow: 1563 case Intrinsic::smul_with_overflow: 1564 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1565 Known, Known2, Depth, Q); 1566 break; 1567 } 1568 } 1569 } 1570 } 1571 } 1572 1573 /// Determine which bits of V are known to be either zero or one and return 1574 /// them. 1575 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { 1576 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1577 computeKnownBits(V, Known, Depth, Q); 1578 return Known; 1579 } 1580 1581 /// Determine which bits of V are known to be either zero or one and return 1582 /// them in the Known bit set. 1583 /// 1584 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1585 /// we cannot optimize based on the assumption that it is zero without changing 1586 /// it to be an explicit zero. If we don't change it to zero, other code could 1587 /// optimized based on the contradictory assumption that it is non-zero. 1588 /// Because instcombine aggressively folds operations with undef args anyway, 1589 /// this won't lose us code quality. 1590 /// 1591 /// This function is defined on values with integer type, values with pointer 1592 /// type, and vectors of integers. In the case 1593 /// where V is a vector, known zero, and known one values are the 1594 /// same width as the vector element, and the bit is set only if it is true 1595 /// for all of the elements in the vector. 1596 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 1597 const Query &Q) { 1598 assert(V && "No Value?"); 1599 assert(Depth <= MaxDepth && "Limit Search Depth"); 1600 unsigned BitWidth = Known.getBitWidth(); 1601 1602 assert((V->getType()->isIntOrIntVectorTy(BitWidth) || 1603 V->getType()->isPtrOrPtrVectorTy()) && 1604 "Not integer or pointer type!"); 1605 1606 Type *ScalarTy = V->getType()->getScalarType(); 1607 unsigned ExpectedWidth = ScalarTy->isPointerTy() ? 1608 Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); 1609 assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth"); 1610 (void)BitWidth; 1611 (void)ExpectedWidth; 1612 1613 const APInt *C; 1614 if (match(V, m_APInt(C))) { 1615 // We know all of the bits for a scalar constant or a splat vector constant! 1616 Known.One = *C; 1617 Known.Zero = ~Known.One; 1618 return; 1619 } 1620 // Null and aggregate-zero are all-zeros. 1621 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1622 Known.setAllZero(); 1623 return; 1624 } 1625 // Handle a constant vector by taking the intersection of the known bits of 1626 // each element. 1627 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 1628 // We know that CDS must be a vector of integers. Take the intersection of 1629 // each element. 1630 Known.Zero.setAllBits(); Known.One.setAllBits(); 1631 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1632 APInt Elt = CDS->getElementAsAPInt(i); 1633 Known.Zero &= ~Elt; 1634 Known.One &= Elt; 1635 } 1636 return; 1637 } 1638 1639 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1640 // We know that CV must be a vector of integers. Take the intersection of 1641 // each element. 1642 Known.Zero.setAllBits(); Known.One.setAllBits(); 1643 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1644 Constant *Element = CV->getAggregateElement(i); 1645 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1646 if (!ElementCI) { 1647 Known.resetAll(); 1648 return; 1649 } 1650 const APInt &Elt = ElementCI->getValue(); 1651 Known.Zero &= ~Elt; 1652 Known.One &= Elt; 1653 } 1654 return; 1655 } 1656 1657 // Start out not knowing anything. 1658 Known.resetAll(); 1659 1660 // We can't imply anything about undefs. 1661 if (isa<UndefValue>(V)) 1662 return; 1663 1664 // There's no point in looking through other users of ConstantData for 1665 // assumptions. Confirm that we've handled them all. 1666 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1667 1668 // Limit search depth. 1669 // All recursive calls that increase depth must come after this. 1670 if (Depth == MaxDepth) 1671 return; 1672 1673 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1674 // the bits of its aliasee. 1675 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1676 if (!GA->isInterposable()) 1677 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 1678 return; 1679 } 1680 1681 if (const Operator *I = dyn_cast<Operator>(V)) 1682 computeKnownBitsFromOperator(I, Known, Depth, Q); 1683 1684 // Aligned pointers have trailing zeros - refine Known.Zero set 1685 if (V->getType()->isPointerTy()) { 1686 unsigned Align = V->getPointerAlignment(Q.DL); 1687 if (Align) 1688 Known.Zero.setLowBits(countTrailingZeros(Align)); 1689 } 1690 1691 // computeKnownBitsFromAssume strictly refines Known. 1692 // Therefore, we run them after computeKnownBitsFromOperator. 1693 1694 // Check whether a nearby assume intrinsic can determine some known bits. 1695 computeKnownBitsFromAssume(V, Known, Depth, Q); 1696 1697 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1698 } 1699 1700 /// Return true if the given value is known to have exactly one 1701 /// bit set when defined. For vectors return true if every element is known to 1702 /// be a power of two when defined. Supports values with integer or pointer 1703 /// types and vectors of integers. 1704 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 1705 const Query &Q) { 1706 assert(Depth <= MaxDepth && "Limit Search Depth"); 1707 1708 // Attempt to match against constants. 1709 if (OrZero && match(V, m_Power2OrZero())) 1710 return true; 1711 if (match(V, m_Power2())) 1712 return true; 1713 1714 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1715 // it is shifted off the end then the result is undefined. 1716 if (match(V, m_Shl(m_One(), m_Value()))) 1717 return true; 1718 1719 // (signmask) >>l X is clearly a power of two if the one is not shifted off 1720 // the bottom. If it is shifted off the bottom then the result is undefined. 1721 if (match(V, m_LShr(m_SignMask(), m_Value()))) 1722 return true; 1723 1724 // The remaining tests are all recursive, so bail out if we hit the limit. 1725 if (Depth++ == MaxDepth) 1726 return false; 1727 1728 Value *X = nullptr, *Y = nullptr; 1729 // A shift left or a logical shift right of a power of two is a power of two 1730 // or zero. 1731 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1732 match(V, m_LShr(m_Value(X), m_Value())))) 1733 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 1734 1735 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1736 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 1737 1738 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 1739 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 1740 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 1741 1742 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1743 // A power of two and'd with anything is a power of two or zero. 1744 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 1745 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 1746 return true; 1747 // X & (-X) is always a power of two or zero. 1748 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1749 return true; 1750 return false; 1751 } 1752 1753 // Adding a power-of-two or zero to the same power-of-two or zero yields 1754 // either the original power-of-two, a larger power-of-two or zero. 1755 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1756 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1757 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) || 1758 Q.IIQ.hasNoSignedWrap(VOBO)) { 1759 if (match(X, m_And(m_Specific(Y), m_Value())) || 1760 match(X, m_And(m_Value(), m_Specific(Y)))) 1761 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 1762 return true; 1763 if (match(Y, m_And(m_Specific(X), m_Value())) || 1764 match(Y, m_And(m_Value(), m_Specific(X)))) 1765 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 1766 return true; 1767 1768 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1769 KnownBits LHSBits(BitWidth); 1770 computeKnownBits(X, LHSBits, Depth, Q); 1771 1772 KnownBits RHSBits(BitWidth); 1773 computeKnownBits(Y, RHSBits, Depth, Q); 1774 // If i8 V is a power of two or zero: 1775 // ZeroBits: 1 1 1 0 1 1 1 1 1776 // ~ZeroBits: 0 0 0 1 0 0 0 0 1777 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 1778 // If OrZero isn't set, we cannot give back a zero result. 1779 // Make sure either the LHS or RHS has a bit set. 1780 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 1781 return true; 1782 } 1783 } 1784 1785 // An exact divide or right shift can only shift off zero bits, so the result 1786 // is a power of two only if the first operand is a power of two and not 1787 // copying a sign bit (sdiv int_min, 2). 1788 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1789 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1790 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1791 Depth, Q); 1792 } 1793 1794 return false; 1795 } 1796 1797 /// Test whether a GEP's result is known to be non-null. 1798 /// 1799 /// Uses properties inherent in a GEP to try to determine whether it is known 1800 /// to be non-null. 1801 /// 1802 /// Currently this routine does not support vector GEPs. 1803 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 1804 const Query &Q) { 1805 const Function *F = nullptr; 1806 if (const Instruction *I = dyn_cast<Instruction>(GEP)) 1807 F = I->getFunction(); 1808 1809 if (!GEP->isInBounds() || 1810 NullPointerIsDefined(F, GEP->getPointerAddressSpace())) 1811 return false; 1812 1813 // FIXME: Support vector-GEPs. 1814 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1815 1816 // If the base pointer is non-null, we cannot walk to a null address with an 1817 // inbounds GEP in address space zero. 1818 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 1819 return true; 1820 1821 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1822 // If so, then the GEP cannot produce a null pointer, as doing so would 1823 // inherently violate the inbounds contract within address space zero. 1824 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1825 GTI != GTE; ++GTI) { 1826 // Struct types are easy -- they must always be indexed by a constant. 1827 if (StructType *STy = GTI.getStructTypeOrNull()) { 1828 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1829 unsigned ElementIdx = OpC->getZExtValue(); 1830 const StructLayout *SL = Q.DL.getStructLayout(STy); 1831 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1832 if (ElementOffset > 0) 1833 return true; 1834 continue; 1835 } 1836 1837 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1838 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1839 continue; 1840 1841 // Fast path the constant operand case both for efficiency and so we don't 1842 // increment Depth when just zipping down an all-constant GEP. 1843 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1844 if (!OpC->isZero()) 1845 return true; 1846 continue; 1847 } 1848 1849 // We post-increment Depth here because while isKnownNonZero increments it 1850 // as well, when we pop back up that increment won't persist. We don't want 1851 // to recurse 10k times just because we have 10k GEP operands. We don't 1852 // bail completely out because we want to handle constant GEPs regardless 1853 // of depth. 1854 if (Depth++ >= MaxDepth) 1855 continue; 1856 1857 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 1858 return true; 1859 } 1860 1861 return false; 1862 } 1863 1864 static bool isKnownNonNullFromDominatingCondition(const Value *V, 1865 const Instruction *CtxI, 1866 const DominatorTree *DT) { 1867 assert(V->getType()->isPointerTy() && "V must be pointer type"); 1868 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull"); 1869 1870 if (!CtxI || !DT) 1871 return false; 1872 1873 unsigned NumUsesExplored = 0; 1874 for (auto *U : V->users()) { 1875 // Avoid massive lists 1876 if (NumUsesExplored >= DomConditionsMaxUses) 1877 break; 1878 NumUsesExplored++; 1879 1880 // If the value is used as an argument to a call or invoke, then argument 1881 // attributes may provide an answer about null-ness. 1882 if (auto CS = ImmutableCallSite(U)) 1883 if (auto *CalledFunc = CS.getCalledFunction()) 1884 for (const Argument &Arg : CalledFunc->args()) 1885 if (CS.getArgOperand(Arg.getArgNo()) == V && 1886 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) 1887 return true; 1888 1889 // Consider only compare instructions uniquely controlling a branch 1890 CmpInst::Predicate Pred; 1891 if (!match(const_cast<User *>(U), 1892 m_c_ICmp(Pred, m_Specific(V), m_Zero())) || 1893 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) 1894 continue; 1895 1896 SmallVector<const User *, 4> WorkList; 1897 SmallPtrSet<const User *, 4> Visited; 1898 for (auto *CmpU : U->users()) { 1899 assert(WorkList.empty() && "Should be!"); 1900 if (Visited.insert(CmpU).second) 1901 WorkList.push_back(CmpU); 1902 1903 while (!WorkList.empty()) { 1904 auto *Curr = WorkList.pop_back_val(); 1905 1906 // If a user is an AND, add all its users to the work list. We only 1907 // propagate "pred != null" condition through AND because it is only 1908 // correct to assume that all conditions of AND are met in true branch. 1909 // TODO: Support similar logic of OR and EQ predicate? 1910 if (Pred == ICmpInst::ICMP_NE) 1911 if (auto *BO = dyn_cast<BinaryOperator>(Curr)) 1912 if (BO->getOpcode() == Instruction::And) { 1913 for (auto *BOU : BO->users()) 1914 if (Visited.insert(BOU).second) 1915 WorkList.push_back(BOU); 1916 continue; 1917 } 1918 1919 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) { 1920 assert(BI->isConditional() && "uses a comparison!"); 1921 1922 BasicBlock *NonNullSuccessor = 1923 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); 1924 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 1925 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 1926 return true; 1927 } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) && 1928 DT->dominates(cast<Instruction>(Curr), CtxI)) { 1929 return true; 1930 } 1931 } 1932 } 1933 } 1934 1935 return false; 1936 } 1937 1938 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1939 /// ensure that the value it's attached to is never Value? 'RangeType' is 1940 /// is the type of the value described by the range. 1941 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 1942 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1943 assert(NumRanges >= 1); 1944 for (unsigned i = 0; i < NumRanges; ++i) { 1945 ConstantInt *Lower = 1946 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1947 ConstantInt *Upper = 1948 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1949 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1950 if (Range.contains(Value)) 1951 return false; 1952 } 1953 return true; 1954 } 1955 1956 /// Return true if the given value is known to be non-zero when defined. For 1957 /// vectors, return true if every element is known to be non-zero when 1958 /// defined. For pointers, if the context instruction and dominator tree are 1959 /// specified, perform context-sensitive analysis and return true if the 1960 /// pointer couldn't possibly be null at the specified instruction. 1961 /// Supports values with integer or pointer type and vectors of integers. 1962 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) { 1963 if (auto *C = dyn_cast<Constant>(V)) { 1964 if (C->isNullValue()) 1965 return false; 1966 if (isa<ConstantInt>(C)) 1967 // Must be non-zero due to null test above. 1968 return true; 1969 1970 // For constant vectors, check that all elements are undefined or known 1971 // non-zero to determine that the whole vector is known non-zero. 1972 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) { 1973 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 1974 Constant *Elt = C->getAggregateElement(i); 1975 if (!Elt || Elt->isNullValue()) 1976 return false; 1977 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 1978 return false; 1979 } 1980 return true; 1981 } 1982 1983 // A global variable in address space 0 is non null unless extern weak 1984 // or an absolute symbol reference. Other address spaces may have null as a 1985 // valid address for a global, so we can't assume anything. 1986 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 1987 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 1988 GV->getType()->getAddressSpace() == 0) 1989 return true; 1990 } else 1991 return false; 1992 } 1993 1994 if (auto *I = dyn_cast<Instruction>(V)) { 1995 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) { 1996 // If the possible ranges don't contain zero, then the value is 1997 // definitely non-zero. 1998 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 1999 const APInt ZeroValue(Ty->getBitWidth(), 0); 2000 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 2001 return true; 2002 } 2003 } 2004 } 2005 2006 // Some of the tests below are recursive, so bail out if we hit the limit. 2007 if (Depth++ >= MaxDepth) 2008 return false; 2009 2010 // Check for pointer simplifications. 2011 if (V->getType()->isPointerTy()) { 2012 // Alloca never returns null, malloc might. 2013 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) 2014 return true; 2015 2016 // A byval, inalloca, or nonnull argument is never null. 2017 if (const Argument *A = dyn_cast<Argument>(V)) 2018 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr()) 2019 return true; 2020 2021 // A Load tagged with nonnull metadata is never null. 2022 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 2023 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull)) 2024 return true; 2025 2026 if (const auto *Call = dyn_cast<CallBase>(V)) { 2027 if (Call->isReturnNonNull()) 2028 return true; 2029 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call)) 2030 return isKnownNonZero(RP, Depth, Q); 2031 } 2032 } 2033 2034 2035 // Check for recursive pointer simplifications. 2036 if (V->getType()->isPointerTy()) { 2037 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) 2038 return true; 2039 2040 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 2041 if (isGEPKnownNonNull(GEP, Depth, Q)) 2042 return true; 2043 } 2044 2045 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 2046 2047 // X | Y != 0 if X != 0 or Y != 0. 2048 Value *X = nullptr, *Y = nullptr; 2049 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 2050 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q); 2051 2052 // ext X != 0 if X != 0. 2053 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 2054 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 2055 2056 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 2057 // if the lowest bit is shifted off the end. 2058 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { 2059 // shl nuw can't remove any non-zero bits. 2060 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2061 if (Q.IIQ.hasNoUnsignedWrap(BO)) 2062 return isKnownNonZero(X, Depth, Q); 2063 2064 KnownBits Known(BitWidth); 2065 computeKnownBits(X, Known, Depth, Q); 2066 if (Known.One[0]) 2067 return true; 2068 } 2069 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 2070 // defined if the sign bit is shifted off the end. 2071 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 2072 // shr exact can only shift out zero bits. 2073 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 2074 if (BO->isExact()) 2075 return isKnownNonZero(X, Depth, Q); 2076 2077 KnownBits Known = computeKnownBits(X, Depth, Q); 2078 if (Known.isNegative()) 2079 return true; 2080 2081 // If the shifter operand is a constant, and all of the bits shifted 2082 // out are known to be zero, and X is known non-zero then at least one 2083 // non-zero bit must remain. 2084 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 2085 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 2086 // Is there a known one in the portion not shifted out? 2087 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) 2088 return true; 2089 // Are all the bits to be shifted out known zero? 2090 if (Known.countMinTrailingZeros() >= ShiftVal) 2091 return isKnownNonZero(X, Depth, Q); 2092 } 2093 } 2094 // div exact can only produce a zero if the dividend is zero. 2095 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 2096 return isKnownNonZero(X, Depth, Q); 2097 } 2098 // X + Y. 2099 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2100 KnownBits XKnown = computeKnownBits(X, Depth, Q); 2101 KnownBits YKnown = computeKnownBits(Y, Depth, Q); 2102 2103 // If X and Y are both non-negative (as signed values) then their sum is not 2104 // zero unless both X and Y are zero. 2105 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 2106 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q)) 2107 return true; 2108 2109 // If X and Y are both negative (as signed values) then their sum is not 2110 // zero unless both X and Y equal INT_MIN. 2111 if (XKnown.isNegative() && YKnown.isNegative()) { 2112 APInt Mask = APInt::getSignedMaxValue(BitWidth); 2113 // The sign bit of X is set. If some other bit is set then X is not equal 2114 // to INT_MIN. 2115 if (XKnown.One.intersects(Mask)) 2116 return true; 2117 // The sign bit of Y is set. If some other bit is set then Y is not equal 2118 // to INT_MIN. 2119 if (YKnown.One.intersects(Mask)) 2120 return true; 2121 } 2122 2123 // The sum of a non-negative number and a power of two is not zero. 2124 if (XKnown.isNonNegative() && 2125 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 2126 return true; 2127 if (YKnown.isNonNegative() && 2128 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 2129 return true; 2130 } 2131 // X * Y. 2132 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 2133 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2134 // If X and Y are non-zero then so is X * Y as long as the multiplication 2135 // does not overflow. 2136 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) && 2137 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q)) 2138 return true; 2139 } 2140 // (C ? X : Y) != 0 if X != 0 and Y != 0. 2141 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 2142 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) && 2143 isKnownNonZero(SI->getFalseValue(), Depth, Q)) 2144 return true; 2145 } 2146 // PHI 2147 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 2148 // Try and detect a recurrence that monotonically increases from a 2149 // starting value, as these are common as induction variables. 2150 if (PN->getNumIncomingValues() == 2) { 2151 Value *Start = PN->getIncomingValue(0); 2152 Value *Induction = PN->getIncomingValue(1); 2153 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 2154 std::swap(Start, Induction); 2155 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 2156 if (!C->isZero() && !C->isNegative()) { 2157 ConstantInt *X; 2158 if (Q.IIQ.UseInstrInfo && 2159 (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 2160 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 2161 !X->isNegative()) 2162 return true; 2163 } 2164 } 2165 } 2166 // Check if all incoming values are non-zero constant. 2167 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) { 2168 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero(); 2169 }); 2170 if (AllNonZeroConstants) 2171 return true; 2172 } 2173 2174 KnownBits Known(BitWidth); 2175 computeKnownBits(V, Known, Depth, Q); 2176 return Known.One != 0; 2177 } 2178 2179 /// Return true if V2 == V1 + X, where X is known non-zero. 2180 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { 2181 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2182 if (!BO || BO->getOpcode() != Instruction::Add) 2183 return false; 2184 Value *Op = nullptr; 2185 if (V2 == BO->getOperand(0)) 2186 Op = BO->getOperand(1); 2187 else if (V2 == BO->getOperand(1)) 2188 Op = BO->getOperand(0); 2189 else 2190 return false; 2191 return isKnownNonZero(Op, 0, Q); 2192 } 2193 2194 /// Return true if it is known that V1 != V2. 2195 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { 2196 if (V1 == V2) 2197 return false; 2198 if (V1->getType() != V2->getType()) 2199 // We can't look through casts yet. 2200 return false; 2201 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) 2202 return true; 2203 2204 if (V1->getType()->isIntOrIntVectorTy()) { 2205 // Are any known bits in V1 contradictory to known bits in V2? If V1 2206 // has a known zero where V2 has a known one, they must not be equal. 2207 KnownBits Known1 = computeKnownBits(V1, 0, Q); 2208 KnownBits Known2 = computeKnownBits(V2, 0, Q); 2209 2210 if (Known1.Zero.intersects(Known2.One) || 2211 Known2.Zero.intersects(Known1.One)) 2212 return true; 2213 } 2214 return false; 2215 } 2216 2217 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2218 /// simplify operations downstream. Mask is known to be zero for bits that V 2219 /// cannot have. 2220 /// 2221 /// This function is defined on values with integer type, values with pointer 2222 /// type, and vectors of integers. In the case 2223 /// where V is a vector, the mask, known zero, and known one values are the 2224 /// same width as the vector element, and the bit is set only if it is true 2225 /// for all of the elements in the vector. 2226 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2227 const Query &Q) { 2228 KnownBits Known(Mask.getBitWidth()); 2229 computeKnownBits(V, Known, Depth, Q); 2230 return Mask.isSubsetOf(Known.Zero); 2231 } 2232 2233 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow). 2234 // Returns the input and lower/upper bounds. 2235 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, 2236 const APInt *&CLow, const APInt *&CHigh) { 2237 assert(isa<Operator>(Select) && 2238 cast<Operator>(Select)->getOpcode() == Instruction::Select && 2239 "Input should be a Select!"); 2240 2241 const Value *LHS, *RHS, *LHS2, *RHS2; 2242 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor; 2243 if (SPF != SPF_SMAX && SPF != SPF_SMIN) 2244 return false; 2245 2246 if (!match(RHS, m_APInt(CLow))) 2247 return false; 2248 2249 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor; 2250 if (getInverseMinMaxFlavor(SPF) != SPF2) 2251 return false; 2252 2253 if (!match(RHS2, m_APInt(CHigh))) 2254 return false; 2255 2256 if (SPF == SPF_SMIN) 2257 std::swap(CLow, CHigh); 2258 2259 In = LHS2; 2260 return CLow->sle(*CHigh); 2261 } 2262 2263 /// For vector constants, loop over the elements and find the constant with the 2264 /// minimum number of sign bits. Return 0 if the value is not a vector constant 2265 /// or if any element was not analyzed; otherwise, return the count for the 2266 /// element with the minimum number of sign bits. 2267 static unsigned computeNumSignBitsVectorConstant(const Value *V, 2268 unsigned TyBits) { 2269 const auto *CV = dyn_cast<Constant>(V); 2270 if (!CV || !CV->getType()->isVectorTy()) 2271 return 0; 2272 2273 unsigned MinSignBits = TyBits; 2274 unsigned NumElts = CV->getType()->getVectorNumElements(); 2275 for (unsigned i = 0; i != NumElts; ++i) { 2276 // If we find a non-ConstantInt, bail out. 2277 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 2278 if (!Elt) 2279 return 0; 2280 2281 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); 2282 } 2283 2284 return MinSignBits; 2285 } 2286 2287 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2288 const Query &Q); 2289 2290 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 2291 const Query &Q) { 2292 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q); 2293 assert(Result > 0 && "At least one sign bit needs to be present!"); 2294 return Result; 2295 } 2296 2297 /// Return the number of times the sign bit of the register is replicated into 2298 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2299 /// (itself), but other cases can give us information. For example, immediately 2300 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2301 /// other, so we return 3. For vectors, return the number of sign bits for the 2302 /// vector element with the minimum number of known sign bits. 2303 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2304 const Query &Q) { 2305 assert(Depth <= MaxDepth && "Limit Search Depth"); 2306 2307 // We return the minimum number of sign bits that are guaranteed to be present 2308 // in V, so for undef we have to conservatively return 1. We don't have the 2309 // same behavior for poison though -- that's a FIXME today. 2310 2311 Type *ScalarTy = V->getType()->getScalarType(); 2312 unsigned TyBits = ScalarTy->isPointerTy() ? 2313 Q.DL.getIndexTypeSizeInBits(ScalarTy) : 2314 Q.DL.getTypeSizeInBits(ScalarTy); 2315 2316 unsigned Tmp, Tmp2; 2317 unsigned FirstAnswer = 1; 2318 2319 // Note that ConstantInt is handled by the general computeKnownBits case 2320 // below. 2321 2322 if (Depth == MaxDepth) 2323 return 1; // Limit search depth. 2324 2325 const Operator *U = dyn_cast<Operator>(V); 2326 switch (Operator::getOpcode(V)) { 2327 default: break; 2328 case Instruction::SExt: 2329 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2330 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 2331 2332 case Instruction::SDiv: { 2333 const APInt *Denominator; 2334 // sdiv X, C -> adds log(C) sign bits. 2335 if (match(U->getOperand(1), m_APInt(Denominator))) { 2336 2337 // Ignore non-positive denominator. 2338 if (!Denominator->isStrictlyPositive()) 2339 break; 2340 2341 // Calculate the incoming numerator bits. 2342 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2343 2344 // Add floor(log(C)) bits to the numerator bits. 2345 return std::min(TyBits, NumBits + Denominator->logBase2()); 2346 } 2347 break; 2348 } 2349 2350 case Instruction::SRem: { 2351 const APInt *Denominator; 2352 // srem X, C -> we know that the result is within [-C+1,C) when C is a 2353 // positive constant. This let us put a lower bound on the number of sign 2354 // bits. 2355 if (match(U->getOperand(1), m_APInt(Denominator))) { 2356 2357 // Ignore non-positive denominator. 2358 if (!Denominator->isStrictlyPositive()) 2359 break; 2360 2361 // Calculate the incoming numerator bits. SRem by a positive constant 2362 // can't lower the number of sign bits. 2363 unsigned NumrBits = 2364 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2365 2366 // Calculate the leading sign bit constraints by examining the 2367 // denominator. Given that the denominator is positive, there are two 2368 // cases: 2369 // 2370 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2371 // (1 << ceilLogBase2(C)). 2372 // 2373 // 2. the numerator is negative. Then the result range is (-C,0] and 2374 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2375 // 2376 // Thus a lower bound on the number of sign bits is `TyBits - 2377 // ceilLogBase2(C)`. 2378 2379 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2380 return std::max(NumrBits, ResBits); 2381 } 2382 break; 2383 } 2384 2385 case Instruction::AShr: { 2386 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2387 // ashr X, C -> adds C sign bits. Vectors too. 2388 const APInt *ShAmt; 2389 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2390 if (ShAmt->uge(TyBits)) 2391 break; // Bad shift. 2392 unsigned ShAmtLimited = ShAmt->getZExtValue(); 2393 Tmp += ShAmtLimited; 2394 if (Tmp > TyBits) Tmp = TyBits; 2395 } 2396 return Tmp; 2397 } 2398 case Instruction::Shl: { 2399 const APInt *ShAmt; 2400 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2401 // shl destroys sign bits. 2402 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2403 if (ShAmt->uge(TyBits) || // Bad shift. 2404 ShAmt->uge(Tmp)) break; // Shifted all sign bits out. 2405 Tmp2 = ShAmt->getZExtValue(); 2406 return Tmp - Tmp2; 2407 } 2408 break; 2409 } 2410 case Instruction::And: 2411 case Instruction::Or: 2412 case Instruction::Xor: // NOT is handled here. 2413 // Logical binary ops preserve the number of sign bits at the worst. 2414 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2415 if (Tmp != 1) { 2416 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2417 FirstAnswer = std::min(Tmp, Tmp2); 2418 // We computed what we know about the sign bits as our first 2419 // answer. Now proceed to the generic code that uses 2420 // computeKnownBits, and pick whichever answer is better. 2421 } 2422 break; 2423 2424 case Instruction::Select: { 2425 // If we have a clamp pattern, we know that the number of sign bits will be 2426 // the minimum of the clamp min/max range. 2427 const Value *X; 2428 const APInt *CLow, *CHigh; 2429 if (isSignedMinMaxClamp(U, X, CLow, CHigh)) 2430 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); 2431 2432 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2433 if (Tmp == 1) break; 2434 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 2435 return std::min(Tmp, Tmp2); 2436 } 2437 2438 case Instruction::Add: 2439 // Add can have at most one carry bit. Thus we know that the output 2440 // is, at worst, one more bit than the inputs. 2441 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2442 if (Tmp == 1) break; 2443 2444 // Special case decrementing a value (ADD X, -1): 2445 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2446 if (CRHS->isAllOnesValue()) { 2447 KnownBits Known(TyBits); 2448 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 2449 2450 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2451 // sign bits set. 2452 if ((Known.Zero | 1).isAllOnesValue()) 2453 return TyBits; 2454 2455 // If we are subtracting one from a positive number, there is no carry 2456 // out of the result. 2457 if (Known.isNonNegative()) 2458 return Tmp; 2459 } 2460 2461 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2462 if (Tmp2 == 1) break; 2463 return std::min(Tmp, Tmp2)-1; 2464 2465 case Instruction::Sub: 2466 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2467 if (Tmp2 == 1) break; 2468 2469 // Handle NEG. 2470 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2471 if (CLHS->isNullValue()) { 2472 KnownBits Known(TyBits); 2473 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 2474 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2475 // sign bits set. 2476 if ((Known.Zero | 1).isAllOnesValue()) 2477 return TyBits; 2478 2479 // If the input is known to be positive (the sign bit is known clear), 2480 // the output of the NEG has the same number of sign bits as the input. 2481 if (Known.isNonNegative()) 2482 return Tmp2; 2483 2484 // Otherwise, we treat this like a SUB. 2485 } 2486 2487 // Sub can have at most one carry bit. Thus we know that the output 2488 // is, at worst, one more bit than the inputs. 2489 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2490 if (Tmp == 1) break; 2491 return std::min(Tmp, Tmp2)-1; 2492 2493 case Instruction::Mul: { 2494 // The output of the Mul can be at most twice the valid bits in the inputs. 2495 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2496 if (SignBitsOp0 == 1) break; 2497 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2498 if (SignBitsOp1 == 1) break; 2499 unsigned OutValidBits = 2500 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); 2501 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; 2502 } 2503 2504 case Instruction::PHI: { 2505 const PHINode *PN = cast<PHINode>(U); 2506 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2507 // Don't analyze large in-degree PHIs. 2508 if (NumIncomingValues > 4) break; 2509 // Unreachable blocks may have zero-operand PHI nodes. 2510 if (NumIncomingValues == 0) break; 2511 2512 // Take the minimum of all incoming values. This can't infinitely loop 2513 // because of our depth threshold. 2514 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q); 2515 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2516 if (Tmp == 1) return Tmp; 2517 Tmp = std::min( 2518 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q)); 2519 } 2520 return Tmp; 2521 } 2522 2523 case Instruction::Trunc: 2524 // FIXME: it's tricky to do anything useful for this, but it is an important 2525 // case for targets like X86. 2526 break; 2527 2528 case Instruction::ExtractElement: 2529 // Look through extract element. At the moment we keep this simple and skip 2530 // tracking the specific element. But at least we might find information 2531 // valid for all elements of the vector (for example if vector is sign 2532 // extended, shifted, etc). 2533 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2534 2535 case Instruction::ShuffleVector: { 2536 // TODO: This is copied almost directly from the SelectionDAG version of 2537 // ComputeNumSignBits. It would be better if we could share common 2538 // code. If not, make sure that changes are translated to the DAG. 2539 2540 // Collect the minimum number of sign bits that are shared by every vector 2541 // element referenced by the shuffle. 2542 auto *Shuf = cast<ShuffleVectorInst>(U); 2543 int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements(); 2544 int NumMaskElts = Shuf->getMask()->getType()->getVectorNumElements(); 2545 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2546 for (int i = 0; i != NumMaskElts; ++i) { 2547 int M = Shuf->getMaskValue(i); 2548 assert(M < NumElts * 2 && "Invalid shuffle mask constant"); 2549 // For undef elements, we don't know anything about the common state of 2550 // the shuffle result. 2551 if (M == -1) 2552 return 1; 2553 if (M < NumElts) 2554 DemandedLHS.setBit(M % NumElts); 2555 else 2556 DemandedRHS.setBit(M % NumElts); 2557 } 2558 Tmp = std::numeric_limits<unsigned>::max(); 2559 if (!!DemandedLHS) 2560 Tmp = ComputeNumSignBits(Shuf->getOperand(0), Depth + 1, Q); 2561 if (!!DemandedRHS) { 2562 Tmp2 = ComputeNumSignBits(Shuf->getOperand(1), Depth + 1, Q); 2563 Tmp = std::min(Tmp, Tmp2); 2564 } 2565 // If we don't know anything, early out and try computeKnownBits fall-back. 2566 if (Tmp == 1) 2567 break; 2568 assert(Tmp <= V->getType()->getScalarSizeInBits() && 2569 "Failed to determine minimum sign bits"); 2570 return Tmp; 2571 } 2572 } 2573 2574 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2575 // use this information. 2576 2577 // If we can examine all elements of a vector constant successfully, we're 2578 // done (we can't do any better than that). If not, keep trying. 2579 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits)) 2580 return VecSignBits; 2581 2582 KnownBits Known(TyBits); 2583 computeKnownBits(V, Known, Depth, Q); 2584 2585 // If we know that the sign bit is either zero or one, determine the number of 2586 // identical bits in the top of the input value. 2587 return std::max(FirstAnswer, Known.countMinSignBits()); 2588 } 2589 2590 /// This function computes the integer multiple of Base that equals V. 2591 /// If successful, it returns true and returns the multiple in 2592 /// Multiple. If unsuccessful, it returns false. It looks 2593 /// through SExt instructions only if LookThroughSExt is true. 2594 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2595 bool LookThroughSExt, unsigned Depth) { 2596 const unsigned MaxDepth = 6; 2597 2598 assert(V && "No Value?"); 2599 assert(Depth <= MaxDepth && "Limit Search Depth"); 2600 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2601 2602 Type *T = V->getType(); 2603 2604 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2605 2606 if (Base == 0) 2607 return false; 2608 2609 if (Base == 1) { 2610 Multiple = V; 2611 return true; 2612 } 2613 2614 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2615 Constant *BaseVal = ConstantInt::get(T, Base); 2616 if (CO && CO == BaseVal) { 2617 // Multiple is 1. 2618 Multiple = ConstantInt::get(T, 1); 2619 return true; 2620 } 2621 2622 if (CI && CI->getZExtValue() % Base == 0) { 2623 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2624 return true; 2625 } 2626 2627 if (Depth == MaxDepth) return false; // Limit search depth. 2628 2629 Operator *I = dyn_cast<Operator>(V); 2630 if (!I) return false; 2631 2632 switch (I->getOpcode()) { 2633 default: break; 2634 case Instruction::SExt: 2635 if (!LookThroughSExt) return false; 2636 // otherwise fall through to ZExt 2637 LLVM_FALLTHROUGH; 2638 case Instruction::ZExt: 2639 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2640 LookThroughSExt, Depth+1); 2641 case Instruction::Shl: 2642 case Instruction::Mul: { 2643 Value *Op0 = I->getOperand(0); 2644 Value *Op1 = I->getOperand(1); 2645 2646 if (I->getOpcode() == Instruction::Shl) { 2647 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2648 if (!Op1CI) return false; 2649 // Turn Op0 << Op1 into Op0 * 2^Op1 2650 APInt Op1Int = Op1CI->getValue(); 2651 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2652 APInt API(Op1Int.getBitWidth(), 0); 2653 API.setBit(BitToSet); 2654 Op1 = ConstantInt::get(V->getContext(), API); 2655 } 2656 2657 Value *Mul0 = nullptr; 2658 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2659 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2660 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2661 if (Op1C->getType()->getPrimitiveSizeInBits() < 2662 MulC->getType()->getPrimitiveSizeInBits()) 2663 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2664 if (Op1C->getType()->getPrimitiveSizeInBits() > 2665 MulC->getType()->getPrimitiveSizeInBits()) 2666 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2667 2668 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2669 Multiple = ConstantExpr::getMul(MulC, Op1C); 2670 return true; 2671 } 2672 2673 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2674 if (Mul0CI->getValue() == 1) { 2675 // V == Base * Op1, so return Op1 2676 Multiple = Op1; 2677 return true; 2678 } 2679 } 2680 2681 Value *Mul1 = nullptr; 2682 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2683 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2684 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2685 if (Op0C->getType()->getPrimitiveSizeInBits() < 2686 MulC->getType()->getPrimitiveSizeInBits()) 2687 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2688 if (Op0C->getType()->getPrimitiveSizeInBits() > 2689 MulC->getType()->getPrimitiveSizeInBits()) 2690 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2691 2692 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2693 Multiple = ConstantExpr::getMul(MulC, Op0C); 2694 return true; 2695 } 2696 2697 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2698 if (Mul1CI->getValue() == 1) { 2699 // V == Base * Op0, so return Op0 2700 Multiple = Op0; 2701 return true; 2702 } 2703 } 2704 } 2705 } 2706 2707 // We could not determine if V is a multiple of Base. 2708 return false; 2709 } 2710 2711 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS, 2712 const TargetLibraryInfo *TLI) { 2713 const Function *F = ICS.getCalledFunction(); 2714 if (!F) 2715 return Intrinsic::not_intrinsic; 2716 2717 if (F->isIntrinsic()) 2718 return F->getIntrinsicID(); 2719 2720 if (!TLI) 2721 return Intrinsic::not_intrinsic; 2722 2723 LibFunc Func; 2724 // We're going to make assumptions on the semantics of the functions, check 2725 // that the target knows that it's available in this environment and it does 2726 // not have local linkage. 2727 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func)) 2728 return Intrinsic::not_intrinsic; 2729 2730 if (!ICS.onlyReadsMemory()) 2731 return Intrinsic::not_intrinsic; 2732 2733 // Otherwise check if we have a call to a function that can be turned into a 2734 // vector intrinsic. 2735 switch (Func) { 2736 default: 2737 break; 2738 case LibFunc_sin: 2739 case LibFunc_sinf: 2740 case LibFunc_sinl: 2741 return Intrinsic::sin; 2742 case LibFunc_cos: 2743 case LibFunc_cosf: 2744 case LibFunc_cosl: 2745 return Intrinsic::cos; 2746 case LibFunc_exp: 2747 case LibFunc_expf: 2748 case LibFunc_expl: 2749 return Intrinsic::exp; 2750 case LibFunc_exp2: 2751 case LibFunc_exp2f: 2752 case LibFunc_exp2l: 2753 return Intrinsic::exp2; 2754 case LibFunc_log: 2755 case LibFunc_logf: 2756 case LibFunc_logl: 2757 return Intrinsic::log; 2758 case LibFunc_log10: 2759 case LibFunc_log10f: 2760 case LibFunc_log10l: 2761 return Intrinsic::log10; 2762 case LibFunc_log2: 2763 case LibFunc_log2f: 2764 case LibFunc_log2l: 2765 return Intrinsic::log2; 2766 case LibFunc_fabs: 2767 case LibFunc_fabsf: 2768 case LibFunc_fabsl: 2769 return Intrinsic::fabs; 2770 case LibFunc_fmin: 2771 case LibFunc_fminf: 2772 case LibFunc_fminl: 2773 return Intrinsic::minnum; 2774 case LibFunc_fmax: 2775 case LibFunc_fmaxf: 2776 case LibFunc_fmaxl: 2777 return Intrinsic::maxnum; 2778 case LibFunc_copysign: 2779 case LibFunc_copysignf: 2780 case LibFunc_copysignl: 2781 return Intrinsic::copysign; 2782 case LibFunc_floor: 2783 case LibFunc_floorf: 2784 case LibFunc_floorl: 2785 return Intrinsic::floor; 2786 case LibFunc_ceil: 2787 case LibFunc_ceilf: 2788 case LibFunc_ceill: 2789 return Intrinsic::ceil; 2790 case LibFunc_trunc: 2791 case LibFunc_truncf: 2792 case LibFunc_truncl: 2793 return Intrinsic::trunc; 2794 case LibFunc_rint: 2795 case LibFunc_rintf: 2796 case LibFunc_rintl: 2797 return Intrinsic::rint; 2798 case LibFunc_nearbyint: 2799 case LibFunc_nearbyintf: 2800 case LibFunc_nearbyintl: 2801 return Intrinsic::nearbyint; 2802 case LibFunc_round: 2803 case LibFunc_roundf: 2804 case LibFunc_roundl: 2805 return Intrinsic::round; 2806 case LibFunc_pow: 2807 case LibFunc_powf: 2808 case LibFunc_powl: 2809 return Intrinsic::pow; 2810 case LibFunc_sqrt: 2811 case LibFunc_sqrtf: 2812 case LibFunc_sqrtl: 2813 return Intrinsic::sqrt; 2814 } 2815 2816 return Intrinsic::not_intrinsic; 2817 } 2818 2819 /// Return true if we can prove that the specified FP value is never equal to 2820 /// -0.0. 2821 /// 2822 /// NOTE: this function will need to be revisited when we support non-default 2823 /// rounding modes! 2824 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 2825 unsigned Depth) { 2826 if (auto *CFP = dyn_cast<ConstantFP>(V)) 2827 return !CFP->getValueAPF().isNegZero(); 2828 2829 // Limit search depth. 2830 if (Depth == MaxDepth) 2831 return false; 2832 2833 auto *Op = dyn_cast<Operator>(V); 2834 if (!Op) 2835 return false; 2836 2837 // Check if the nsz fast-math flag is set. 2838 if (auto *FPO = dyn_cast<FPMathOperator>(Op)) 2839 if (FPO->hasNoSignedZeros()) 2840 return true; 2841 2842 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. 2843 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP()))) 2844 return true; 2845 2846 // sitofp and uitofp turn into +0.0 for zero. 2847 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) 2848 return true; 2849 2850 if (auto *Call = dyn_cast<CallInst>(Op)) { 2851 Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI); 2852 switch (IID) { 2853 default: 2854 break; 2855 // sqrt(-0.0) = -0.0, no other negative results are possible. 2856 case Intrinsic::sqrt: 2857 case Intrinsic::canonicalize: 2858 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); 2859 // fabs(x) != -0.0 2860 case Intrinsic::fabs: 2861 return true; 2862 } 2863 } 2864 2865 return false; 2866 } 2867 2868 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 2869 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 2870 /// bit despite comparing equal. 2871 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 2872 const TargetLibraryInfo *TLI, 2873 bool SignBitOnly, 2874 unsigned Depth) { 2875 // TODO: This function does not do the right thing when SignBitOnly is true 2876 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 2877 // which flips the sign bits of NaNs. See 2878 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2879 2880 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2881 return !CFP->getValueAPF().isNegative() || 2882 (!SignBitOnly && CFP->getValueAPF().isZero()); 2883 } 2884 2885 // Handle vector of constants. 2886 if (auto *CV = dyn_cast<Constant>(V)) { 2887 if (CV->getType()->isVectorTy()) { 2888 unsigned NumElts = CV->getType()->getVectorNumElements(); 2889 for (unsigned i = 0; i != NumElts; ++i) { 2890 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 2891 if (!CFP) 2892 return false; 2893 if (CFP->getValueAPF().isNegative() && 2894 (SignBitOnly || !CFP->getValueAPF().isZero())) 2895 return false; 2896 } 2897 2898 // All non-negative ConstantFPs. 2899 return true; 2900 } 2901 } 2902 2903 if (Depth == MaxDepth) 2904 return false; // Limit search depth. 2905 2906 const Operator *I = dyn_cast<Operator>(V); 2907 if (!I) 2908 return false; 2909 2910 switch (I->getOpcode()) { 2911 default: 2912 break; 2913 // Unsigned integers are always nonnegative. 2914 case Instruction::UIToFP: 2915 return true; 2916 case Instruction::FMul: 2917 // x*x is always non-negative or a NaN. 2918 if (I->getOperand(0) == I->getOperand(1) && 2919 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 2920 return true; 2921 2922 LLVM_FALLTHROUGH; 2923 case Instruction::FAdd: 2924 case Instruction::FDiv: 2925 case Instruction::FRem: 2926 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2927 Depth + 1) && 2928 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2929 Depth + 1); 2930 case Instruction::Select: 2931 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2932 Depth + 1) && 2933 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2934 Depth + 1); 2935 case Instruction::FPExt: 2936 case Instruction::FPTrunc: 2937 // Widening/narrowing never change sign. 2938 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2939 Depth + 1); 2940 case Instruction::ExtractElement: 2941 // Look through extract element. At the moment we keep this simple and skip 2942 // tracking the specific element. But at least we might find information 2943 // valid for all elements of the vector. 2944 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2945 Depth + 1); 2946 case Instruction::Call: 2947 const auto *CI = cast<CallInst>(I); 2948 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2949 switch (IID) { 2950 default: 2951 break; 2952 case Intrinsic::maxnum: 2953 return (isKnownNeverNaN(I->getOperand(0), TLI) && 2954 cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, 2955 SignBitOnly, Depth + 1)) || 2956 (isKnownNeverNaN(I->getOperand(1), TLI) && 2957 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, 2958 SignBitOnly, Depth + 1)); 2959 2960 case Intrinsic::maximum: 2961 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2962 Depth + 1) || 2963 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2964 Depth + 1); 2965 case Intrinsic::minnum: 2966 case Intrinsic::minimum: 2967 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2968 Depth + 1) && 2969 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2970 Depth + 1); 2971 case Intrinsic::exp: 2972 case Intrinsic::exp2: 2973 case Intrinsic::fabs: 2974 return true; 2975 2976 case Intrinsic::sqrt: 2977 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 2978 if (!SignBitOnly) 2979 return true; 2980 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 2981 CannotBeNegativeZero(CI->getOperand(0), TLI)); 2982 2983 case Intrinsic::powi: 2984 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 2985 // powi(x,n) is non-negative if n is even. 2986 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 2987 return true; 2988 } 2989 // TODO: This is not correct. Given that exp is an integer, here are the 2990 // ways that pow can return a negative value: 2991 // 2992 // pow(x, exp) --> negative if exp is odd and x is negative. 2993 // pow(-0, exp) --> -inf if exp is negative odd. 2994 // pow(-0, exp) --> -0 if exp is positive odd. 2995 // pow(-inf, exp) --> -0 if exp is negative odd. 2996 // pow(-inf, exp) --> -inf if exp is positive odd. 2997 // 2998 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 2999 // but we must return false if x == -0. Unfortunately we do not currently 3000 // have a way of expressing this constraint. See details in 3001 // https://llvm.org/bugs/show_bug.cgi?id=31702. 3002 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3003 Depth + 1); 3004 3005 case Intrinsic::fma: 3006 case Intrinsic::fmuladd: 3007 // x*x+y is non-negative if y is non-negative. 3008 return I->getOperand(0) == I->getOperand(1) && 3009 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 3010 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 3011 Depth + 1); 3012 } 3013 break; 3014 } 3015 return false; 3016 } 3017 3018 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 3019 const TargetLibraryInfo *TLI) { 3020 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 3021 } 3022 3023 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 3024 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 3025 } 3026 3027 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, 3028 unsigned Depth) { 3029 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"); 3030 3031 // If we're told that NaNs won't happen, assume they won't. 3032 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 3033 if (FPMathOp->hasNoNaNs()) 3034 return true; 3035 3036 // Handle scalar constants. 3037 if (auto *CFP = dyn_cast<ConstantFP>(V)) 3038 return !CFP->isNaN(); 3039 3040 if (Depth == MaxDepth) 3041 return false; 3042 3043 if (auto *Inst = dyn_cast<Instruction>(V)) { 3044 switch (Inst->getOpcode()) { 3045 case Instruction::FAdd: 3046 case Instruction::FMul: 3047 case Instruction::FSub: 3048 case Instruction::FDiv: 3049 case Instruction::FRem: { 3050 // TODO: Need isKnownNeverInfinity 3051 return false; 3052 } 3053 case Instruction::Select: { 3054 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 3055 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1); 3056 } 3057 case Instruction::SIToFP: 3058 case Instruction::UIToFP: 3059 return true; 3060 case Instruction::FPTrunc: 3061 case Instruction::FPExt: 3062 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1); 3063 default: 3064 break; 3065 } 3066 } 3067 3068 if (const auto *II = dyn_cast<IntrinsicInst>(V)) { 3069 switch (II->getIntrinsicID()) { 3070 case Intrinsic::canonicalize: 3071 case Intrinsic::fabs: 3072 case Intrinsic::copysign: 3073 case Intrinsic::exp: 3074 case Intrinsic::exp2: 3075 case Intrinsic::floor: 3076 case Intrinsic::ceil: 3077 case Intrinsic::trunc: 3078 case Intrinsic::rint: 3079 case Intrinsic::nearbyint: 3080 case Intrinsic::round: 3081 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1); 3082 case Intrinsic::sqrt: 3083 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) && 3084 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI); 3085 default: 3086 return false; 3087 } 3088 } 3089 3090 // Bail out for constant expressions, but try to handle vector constants. 3091 if (!V->getType()->isVectorTy() || !isa<Constant>(V)) 3092 return false; 3093 3094 // For vectors, verify that each element is not NaN. 3095 unsigned NumElts = V->getType()->getVectorNumElements(); 3096 for (unsigned i = 0; i != NumElts; ++i) { 3097 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 3098 if (!Elt) 3099 return false; 3100 if (isa<UndefValue>(Elt)) 3101 continue; 3102 auto *CElt = dyn_cast<ConstantFP>(Elt); 3103 if (!CElt || CElt->isNaN()) 3104 return false; 3105 } 3106 // All elements were confirmed not-NaN or undefined. 3107 return true; 3108 } 3109 3110 Value *llvm::isBytewiseValue(Value *V) { 3111 3112 // All byte-wide stores are splatable, even of arbitrary variables. 3113 if (V->getType()->isIntegerTy(8)) 3114 return V; 3115 3116 LLVMContext &Ctx = V->getContext(); 3117 3118 // Undef don't care. 3119 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx)); 3120 if (isa<UndefValue>(V)) 3121 return UndefInt8; 3122 3123 Constant *C = dyn_cast<Constant>(V); 3124 if (!C) { 3125 // Conceptually, we could handle things like: 3126 // %a = zext i8 %X to i16 3127 // %b = shl i16 %a, 8 3128 // %c = or i16 %a, %b 3129 // but until there is an example that actually needs this, it doesn't seem 3130 // worth worrying about. 3131 return nullptr; 3132 } 3133 3134 // Handle 'null' ConstantArrayZero etc. 3135 if (C->isNullValue()) 3136 return Constant::getNullValue(Type::getInt8Ty(Ctx)); 3137 3138 // Constant floating-point values can be handled as integer values if the 3139 // corresponding integer value is "byteable". An important case is 0.0. 3140 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 3141 Type *Ty = nullptr; 3142 if (CFP->getType()->isHalfTy()) 3143 Ty = Type::getInt16Ty(Ctx); 3144 else if (CFP->getType()->isFloatTy()) 3145 Ty = Type::getInt32Ty(Ctx); 3146 else if (CFP->getType()->isDoubleTy()) 3147 Ty = Type::getInt64Ty(Ctx); 3148 // Don't handle long double formats, which have strange constraints. 3149 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty)) : nullptr; 3150 } 3151 3152 // We can handle constant integers that are multiple of 8 bits. 3153 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 3154 if (CI->getBitWidth() % 8 == 0) { 3155 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 3156 if (!CI->getValue().isSplat(8)) 3157 return nullptr; 3158 return ConstantInt::get(Ctx, CI->getValue().trunc(8)); 3159 } 3160 } 3161 3162 auto Merge = [&](Value *LHS, Value *RHS) -> Value * { 3163 if (LHS == RHS) 3164 return LHS; 3165 if (!LHS || !RHS) 3166 return nullptr; 3167 if (LHS == UndefInt8) 3168 return RHS; 3169 if (RHS == UndefInt8) 3170 return LHS; 3171 return nullptr; 3172 }; 3173 3174 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) { 3175 Value *Val = UndefInt8; 3176 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) 3177 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I))))) 3178 return nullptr; 3179 return Val; 3180 } 3181 3182 if (isa<ConstantVector>(C)) { 3183 Constant *Splat = cast<ConstantVector>(C)->getSplatValue(); 3184 return Splat ? isBytewiseValue(Splat) : nullptr; 3185 } 3186 3187 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) { 3188 Value *Val = UndefInt8; 3189 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) 3190 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I))))) 3191 return nullptr; 3192 return Val; 3193 } 3194 3195 // Don't try to handle the handful of other constants. 3196 return nullptr; 3197 } 3198 3199 // This is the recursive version of BuildSubAggregate. It takes a few different 3200 // arguments. Idxs is the index within the nested struct From that we are 3201 // looking at now (which is of type IndexedType). IdxSkip is the number of 3202 // indices from Idxs that should be left out when inserting into the resulting 3203 // struct. To is the result struct built so far, new insertvalue instructions 3204 // build on that. 3205 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 3206 SmallVectorImpl<unsigned> &Idxs, 3207 unsigned IdxSkip, 3208 Instruction *InsertBefore) { 3209 StructType *STy = dyn_cast<StructType>(IndexedType); 3210 if (STy) { 3211 // Save the original To argument so we can modify it 3212 Value *OrigTo = To; 3213 // General case, the type indexed by Idxs is a struct 3214 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3215 // Process each struct element recursively 3216 Idxs.push_back(i); 3217 Value *PrevTo = To; 3218 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 3219 InsertBefore); 3220 Idxs.pop_back(); 3221 if (!To) { 3222 // Couldn't find any inserted value for this index? Cleanup 3223 while (PrevTo != OrigTo) { 3224 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 3225 PrevTo = Del->getAggregateOperand(); 3226 Del->eraseFromParent(); 3227 } 3228 // Stop processing elements 3229 break; 3230 } 3231 } 3232 // If we successfully found a value for each of our subaggregates 3233 if (To) 3234 return To; 3235 } 3236 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 3237 // the struct's elements had a value that was inserted directly. In the latter 3238 // case, perhaps we can't determine each of the subelements individually, but 3239 // we might be able to find the complete struct somewhere. 3240 3241 // Find the value that is at that particular spot 3242 Value *V = FindInsertedValue(From, Idxs); 3243 3244 if (!V) 3245 return nullptr; 3246 3247 // Insert the value in the new (sub) aggregate 3248 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 3249 "tmp", InsertBefore); 3250 } 3251 3252 // This helper takes a nested struct and extracts a part of it (which is again a 3253 // struct) into a new value. For example, given the struct: 3254 // { a, { b, { c, d }, e } } 3255 // and the indices "1, 1" this returns 3256 // { c, d }. 3257 // 3258 // It does this by inserting an insertvalue for each element in the resulting 3259 // struct, as opposed to just inserting a single struct. This will only work if 3260 // each of the elements of the substruct are known (ie, inserted into From by an 3261 // insertvalue instruction somewhere). 3262 // 3263 // All inserted insertvalue instructions are inserted before InsertBefore 3264 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 3265 Instruction *InsertBefore) { 3266 assert(InsertBefore && "Must have someplace to insert!"); 3267 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 3268 idx_range); 3269 Value *To = UndefValue::get(IndexedType); 3270 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 3271 unsigned IdxSkip = Idxs.size(); 3272 3273 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 3274 } 3275 3276 /// Given an aggregate and a sequence of indices, see if the scalar value 3277 /// indexed is already around as a register, for example if it was inserted 3278 /// directly into the aggregate. 3279 /// 3280 /// If InsertBefore is not null, this function will duplicate (modified) 3281 /// insertvalues when a part of a nested struct is extracted. 3282 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 3283 Instruction *InsertBefore) { 3284 // Nothing to index? Just return V then (this is useful at the end of our 3285 // recursion). 3286 if (idx_range.empty()) 3287 return V; 3288 // We have indices, so V should have an indexable type. 3289 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 3290 "Not looking at a struct or array?"); 3291 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 3292 "Invalid indices for type?"); 3293 3294 if (Constant *C = dyn_cast<Constant>(V)) { 3295 C = C->getAggregateElement(idx_range[0]); 3296 if (!C) return nullptr; 3297 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 3298 } 3299 3300 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 3301 // Loop the indices for the insertvalue instruction in parallel with the 3302 // requested indices 3303 const unsigned *req_idx = idx_range.begin(); 3304 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 3305 i != e; ++i, ++req_idx) { 3306 if (req_idx == idx_range.end()) { 3307 // We can't handle this without inserting insertvalues 3308 if (!InsertBefore) 3309 return nullptr; 3310 3311 // The requested index identifies a part of a nested aggregate. Handle 3312 // this specially. For example, 3313 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 3314 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 3315 // %C = extractvalue {i32, { i32, i32 } } %B, 1 3316 // This can be changed into 3317 // %A = insertvalue {i32, i32 } undef, i32 10, 0 3318 // %C = insertvalue {i32, i32 } %A, i32 11, 1 3319 // which allows the unused 0,0 element from the nested struct to be 3320 // removed. 3321 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 3322 InsertBefore); 3323 } 3324 3325 // This insert value inserts something else than what we are looking for. 3326 // See if the (aggregate) value inserted into has the value we are 3327 // looking for, then. 3328 if (*req_idx != *i) 3329 return FindInsertedValue(I->getAggregateOperand(), idx_range, 3330 InsertBefore); 3331 } 3332 // If we end up here, the indices of the insertvalue match with those 3333 // requested (though possibly only partially). Now we recursively look at 3334 // the inserted value, passing any remaining indices. 3335 return FindInsertedValue(I->getInsertedValueOperand(), 3336 makeArrayRef(req_idx, idx_range.end()), 3337 InsertBefore); 3338 } 3339 3340 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 3341 // If we're extracting a value from an aggregate that was extracted from 3342 // something else, we can extract from that something else directly instead. 3343 // However, we will need to chain I's indices with the requested indices. 3344 3345 // Calculate the number of indices required 3346 unsigned size = I->getNumIndices() + idx_range.size(); 3347 // Allocate some space to put the new indices in 3348 SmallVector<unsigned, 5> Idxs; 3349 Idxs.reserve(size); 3350 // Add indices from the extract value instruction 3351 Idxs.append(I->idx_begin(), I->idx_end()); 3352 3353 // Add requested indices 3354 Idxs.append(idx_range.begin(), idx_range.end()); 3355 3356 assert(Idxs.size() == size 3357 && "Number of indices added not correct?"); 3358 3359 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 3360 } 3361 // Otherwise, we don't know (such as, extracting from a function return value 3362 // or load instruction) 3363 return nullptr; 3364 } 3365 3366 /// Analyze the specified pointer to see if it can be expressed as a base 3367 /// pointer plus a constant offset. Return the base and offset to the caller. 3368 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 3369 const DataLayout &DL) { 3370 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType()); 3371 APInt ByteOffset(BitWidth, 0); 3372 3373 // We walk up the defs but use a visited set to handle unreachable code. In 3374 // that case, we stop after accumulating the cycle once (not that it 3375 // matters). 3376 SmallPtrSet<Value *, 16> Visited; 3377 while (Visited.insert(Ptr).second) { 3378 if (Ptr->getType()->isVectorTy()) 3379 break; 3380 3381 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 3382 // If one of the values we have visited is an addrspacecast, then 3383 // the pointer type of this GEP may be different from the type 3384 // of the Ptr parameter which was passed to this function. This 3385 // means when we construct GEPOffset, we need to use the size 3386 // of GEP's pointer type rather than the size of the original 3387 // pointer type. 3388 APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 3389 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 3390 break; 3391 3392 APInt OrigByteOffset(ByteOffset); 3393 ByteOffset += GEPOffset.sextOrTrunc(ByteOffset.getBitWidth()); 3394 if (ByteOffset.getMinSignedBits() > 64) { 3395 // Stop traversal if the pointer offset wouldn't fit into int64_t 3396 // (this should be removed if Offset is updated to an APInt) 3397 ByteOffset = OrigByteOffset; 3398 break; 3399 } 3400 3401 Ptr = GEP->getPointerOperand(); 3402 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 3403 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 3404 Ptr = cast<Operator>(Ptr)->getOperand(0); 3405 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 3406 if (GA->isInterposable()) 3407 break; 3408 Ptr = GA->getAliasee(); 3409 } else { 3410 break; 3411 } 3412 } 3413 Offset = ByteOffset.getSExtValue(); 3414 return Ptr; 3415 } 3416 3417 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 3418 unsigned CharSize) { 3419 // Make sure the GEP has exactly three arguments. 3420 if (GEP->getNumOperands() != 3) 3421 return false; 3422 3423 // Make sure the index-ee is a pointer to array of \p CharSize integers. 3424 // CharSize. 3425 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 3426 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 3427 return false; 3428 3429 // Check to make sure that the first operand of the GEP is an integer and 3430 // has value 0 so that we are sure we're indexing into the initializer. 3431 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 3432 if (!FirstIdx || !FirstIdx->isZero()) 3433 return false; 3434 3435 return true; 3436 } 3437 3438 bool llvm::getConstantDataArrayInfo(const Value *V, 3439 ConstantDataArraySlice &Slice, 3440 unsigned ElementSize, uint64_t Offset) { 3441 assert(V); 3442 3443 // Look through bitcast instructions and geps. 3444 V = V->stripPointerCasts(); 3445 3446 // If the value is a GEP instruction or constant expression, treat it as an 3447 // offset. 3448 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3449 // The GEP operator should be based on a pointer to string constant, and is 3450 // indexing into the string constant. 3451 if (!isGEPBasedOnPointerToString(GEP, ElementSize)) 3452 return false; 3453 3454 // If the second index isn't a ConstantInt, then this is a variable index 3455 // into the array. If this occurs, we can't say anything meaningful about 3456 // the string. 3457 uint64_t StartIdx = 0; 3458 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 3459 StartIdx = CI->getZExtValue(); 3460 else 3461 return false; 3462 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize, 3463 StartIdx + Offset); 3464 } 3465 3466 // The GEP instruction, constant or instruction, must reference a global 3467 // variable that is a constant and is initialized. The referenced constant 3468 // initializer is the array that we'll use for optimization. 3469 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 3470 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 3471 return false; 3472 3473 const ConstantDataArray *Array; 3474 ArrayType *ArrayTy; 3475 if (GV->getInitializer()->isNullValue()) { 3476 Type *GVTy = GV->getValueType(); 3477 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) { 3478 // A zeroinitializer for the array; there is no ConstantDataArray. 3479 Array = nullptr; 3480 } else { 3481 const DataLayout &DL = GV->getParent()->getDataLayout(); 3482 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy); 3483 uint64_t Length = SizeInBytes / (ElementSize / 8); 3484 if (Length <= Offset) 3485 return false; 3486 3487 Slice.Array = nullptr; 3488 Slice.Offset = 0; 3489 Slice.Length = Length - Offset; 3490 return true; 3491 } 3492 } else { 3493 // This must be a ConstantDataArray. 3494 Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 3495 if (!Array) 3496 return false; 3497 ArrayTy = Array->getType(); 3498 } 3499 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize)) 3500 return false; 3501 3502 uint64_t NumElts = ArrayTy->getArrayNumElements(); 3503 if (Offset > NumElts) 3504 return false; 3505 3506 Slice.Array = Array; 3507 Slice.Offset = Offset; 3508 Slice.Length = NumElts - Offset; 3509 return true; 3510 } 3511 3512 /// This function computes the length of a null-terminated C string pointed to 3513 /// by V. If successful, it returns true and returns the string in Str. 3514 /// If unsuccessful, it returns false. 3515 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 3516 uint64_t Offset, bool TrimAtNul) { 3517 ConstantDataArraySlice Slice; 3518 if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) 3519 return false; 3520 3521 if (Slice.Array == nullptr) { 3522 if (TrimAtNul) { 3523 Str = StringRef(); 3524 return true; 3525 } 3526 if (Slice.Length == 1) { 3527 Str = StringRef("", 1); 3528 return true; 3529 } 3530 // We cannot instantiate a StringRef as we do not have an appropriate string 3531 // of 0s at hand. 3532 return false; 3533 } 3534 3535 // Start out with the entire array in the StringRef. 3536 Str = Slice.Array->getAsString(); 3537 // Skip over 'offset' bytes. 3538 Str = Str.substr(Slice.Offset); 3539 3540 if (TrimAtNul) { 3541 // Trim off the \0 and anything after it. If the array is not nul 3542 // terminated, we just return the whole end of string. The client may know 3543 // some other way that the string is length-bound. 3544 Str = Str.substr(0, Str.find('\0')); 3545 } 3546 return true; 3547 } 3548 3549 // These next two are very similar to the above, but also look through PHI 3550 // nodes. 3551 // TODO: See if we can integrate these two together. 3552 3553 /// If we can compute the length of the string pointed to by 3554 /// the specified pointer, return 'len+1'. If we can't, return 0. 3555 static uint64_t GetStringLengthH(const Value *V, 3556 SmallPtrSetImpl<const PHINode*> &PHIs, 3557 unsigned CharSize) { 3558 // Look through noop bitcast instructions. 3559 V = V->stripPointerCasts(); 3560 3561 // If this is a PHI node, there are two cases: either we have already seen it 3562 // or we haven't. 3563 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 3564 if (!PHIs.insert(PN).second) 3565 return ~0ULL; // already in the set. 3566 3567 // If it was new, see if all the input strings are the same length. 3568 uint64_t LenSoFar = ~0ULL; 3569 for (Value *IncValue : PN->incoming_values()) { 3570 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 3571 if (Len == 0) return 0; // Unknown length -> unknown. 3572 3573 if (Len == ~0ULL) continue; 3574 3575 if (Len != LenSoFar && LenSoFar != ~0ULL) 3576 return 0; // Disagree -> unknown. 3577 LenSoFar = Len; 3578 } 3579 3580 // Success, all agree. 3581 return LenSoFar; 3582 } 3583 3584 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 3585 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 3586 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 3587 if (Len1 == 0) return 0; 3588 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 3589 if (Len2 == 0) return 0; 3590 if (Len1 == ~0ULL) return Len2; 3591 if (Len2 == ~0ULL) return Len1; 3592 if (Len1 != Len2) return 0; 3593 return Len1; 3594 } 3595 3596 // Otherwise, see if we can read the string. 3597 ConstantDataArraySlice Slice; 3598 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 3599 return 0; 3600 3601 if (Slice.Array == nullptr) 3602 return 1; 3603 3604 // Search for nul characters 3605 unsigned NullIndex = 0; 3606 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 3607 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 3608 break; 3609 } 3610 3611 return NullIndex + 1; 3612 } 3613 3614 /// If we can compute the length of the string pointed to by 3615 /// the specified pointer, return 'len+1'. If we can't, return 0. 3616 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 3617 if (!V->getType()->isPointerTy()) 3618 return 0; 3619 3620 SmallPtrSet<const PHINode*, 32> PHIs; 3621 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 3622 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 3623 // an empty string as a length. 3624 return Len == ~0ULL ? 1 : Len; 3625 } 3626 3627 const Value *llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call) { 3628 assert(Call && 3629 "getArgumentAliasingToReturnedPointer only works on nonnull calls"); 3630 if (const Value *RV = Call->getReturnedArgOperand()) 3631 return RV; 3632 // This can be used only as a aliasing property. 3633 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call)) 3634 return Call->getArgOperand(0); 3635 return nullptr; 3636 } 3637 3638 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 3639 const CallBase *Call) { 3640 return Call->getIntrinsicID() == Intrinsic::launder_invariant_group || 3641 Call->getIntrinsicID() == Intrinsic::strip_invariant_group; 3642 } 3643 3644 /// \p PN defines a loop-variant pointer to an object. Check if the 3645 /// previous iteration of the loop was referring to the same object as \p PN. 3646 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 3647 const LoopInfo *LI) { 3648 // Find the loop-defined value. 3649 Loop *L = LI->getLoopFor(PN->getParent()); 3650 if (PN->getNumIncomingValues() != 2) 3651 return true; 3652 3653 // Find the value from previous iteration. 3654 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 3655 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3656 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 3657 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3658 return true; 3659 3660 // If a new pointer is loaded in the loop, the pointer references a different 3661 // object in every iteration. E.g.: 3662 // for (i) 3663 // int *p = a[i]; 3664 // ... 3665 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 3666 if (!L->isLoopInvariant(Load->getPointerOperand())) 3667 return false; 3668 return true; 3669 } 3670 3671 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 3672 unsigned MaxLookup) { 3673 if (!V->getType()->isPointerTy()) 3674 return V; 3675 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 3676 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3677 V = GEP->getPointerOperand(); 3678 } else if (Operator::getOpcode(V) == Instruction::BitCast || 3679 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 3680 V = cast<Operator>(V)->getOperand(0); 3681 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 3682 if (GA->isInterposable()) 3683 return V; 3684 V = GA->getAliasee(); 3685 } else if (isa<AllocaInst>(V)) { 3686 // An alloca can't be further simplified. 3687 return V; 3688 } else { 3689 if (auto *Call = dyn_cast<CallBase>(V)) { 3690 // CaptureTracking can know about special capturing properties of some 3691 // intrinsics like launder.invariant.group, that can't be expressed with 3692 // the attributes, but have properties like returning aliasing pointer. 3693 // Because some analysis may assume that nocaptured pointer is not 3694 // returned from some special intrinsic (because function would have to 3695 // be marked with returns attribute), it is crucial to use this function 3696 // because it should be in sync with CaptureTracking. Not using it may 3697 // cause weird miscompilations where 2 aliasing pointers are assumed to 3698 // noalias. 3699 if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) { 3700 V = RP; 3701 continue; 3702 } 3703 } 3704 3705 // See if InstructionSimplify knows any relevant tricks. 3706 if (Instruction *I = dyn_cast<Instruction>(V)) 3707 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 3708 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) { 3709 V = Simplified; 3710 continue; 3711 } 3712 3713 return V; 3714 } 3715 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 3716 } 3717 return V; 3718 } 3719 3720 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 3721 const DataLayout &DL, LoopInfo *LI, 3722 unsigned MaxLookup) { 3723 SmallPtrSet<Value *, 4> Visited; 3724 SmallVector<Value *, 4> Worklist; 3725 Worklist.push_back(V); 3726 do { 3727 Value *P = Worklist.pop_back_val(); 3728 P = GetUnderlyingObject(P, DL, MaxLookup); 3729 3730 if (!Visited.insert(P).second) 3731 continue; 3732 3733 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 3734 Worklist.push_back(SI->getTrueValue()); 3735 Worklist.push_back(SI->getFalseValue()); 3736 continue; 3737 } 3738 3739 if (PHINode *PN = dyn_cast<PHINode>(P)) { 3740 // If this PHI changes the underlying object in every iteration of the 3741 // loop, don't look through it. Consider: 3742 // int **A; 3743 // for (i) { 3744 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 3745 // Curr = A[i]; 3746 // *Prev, *Curr; 3747 // 3748 // Prev is tracking Curr one iteration behind so they refer to different 3749 // underlying objects. 3750 if (!LI || !LI->isLoopHeader(PN->getParent()) || 3751 isSameUnderlyingObjectInLoop(PN, LI)) 3752 for (Value *IncValue : PN->incoming_values()) 3753 Worklist.push_back(IncValue); 3754 continue; 3755 } 3756 3757 Objects.push_back(P); 3758 } while (!Worklist.empty()); 3759 } 3760 3761 /// This is the function that does the work of looking through basic 3762 /// ptrtoint+arithmetic+inttoptr sequences. 3763 static const Value *getUnderlyingObjectFromInt(const Value *V) { 3764 do { 3765 if (const Operator *U = dyn_cast<Operator>(V)) { 3766 // If we find a ptrtoint, we can transfer control back to the 3767 // regular getUnderlyingObjectFromInt. 3768 if (U->getOpcode() == Instruction::PtrToInt) 3769 return U->getOperand(0); 3770 // If we find an add of a constant, a multiplied value, or a phi, it's 3771 // likely that the other operand will lead us to the base 3772 // object. We don't have to worry about the case where the 3773 // object address is somehow being computed by the multiply, 3774 // because our callers only care when the result is an 3775 // identifiable object. 3776 if (U->getOpcode() != Instruction::Add || 3777 (!isa<ConstantInt>(U->getOperand(1)) && 3778 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 3779 !isa<PHINode>(U->getOperand(1)))) 3780 return V; 3781 V = U->getOperand(0); 3782 } else { 3783 return V; 3784 } 3785 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 3786 } while (true); 3787 } 3788 3789 /// This is a wrapper around GetUnderlyingObjects and adds support for basic 3790 /// ptrtoint+arithmetic+inttoptr sequences. 3791 /// It returns false if unidentified object is found in GetUnderlyingObjects. 3792 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, 3793 SmallVectorImpl<Value *> &Objects, 3794 const DataLayout &DL) { 3795 SmallPtrSet<const Value *, 16> Visited; 3796 SmallVector<const Value *, 4> Working(1, V); 3797 do { 3798 V = Working.pop_back_val(); 3799 3800 SmallVector<Value *, 4> Objs; 3801 GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL); 3802 3803 for (Value *V : Objs) { 3804 if (!Visited.insert(V).second) 3805 continue; 3806 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 3807 const Value *O = 3808 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 3809 if (O->getType()->isPointerTy()) { 3810 Working.push_back(O); 3811 continue; 3812 } 3813 } 3814 // If GetUnderlyingObjects fails to find an identifiable object, 3815 // getUnderlyingObjectsForCodeGen also fails for safety. 3816 if (!isIdentifiedObject(V)) { 3817 Objects.clear(); 3818 return false; 3819 } 3820 Objects.push_back(const_cast<Value *>(V)); 3821 } 3822 } while (!Working.empty()); 3823 return true; 3824 } 3825 3826 /// Return true if the only users of this pointer are lifetime markers. 3827 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 3828 for (const User *U : V->users()) { 3829 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3830 if (!II) return false; 3831 3832 if (!II->isLifetimeStartOrEnd()) 3833 return false; 3834 } 3835 return true; 3836 } 3837 3838 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3839 const Instruction *CtxI, 3840 const DominatorTree *DT) { 3841 const Operator *Inst = dyn_cast<Operator>(V); 3842 if (!Inst) 3843 return false; 3844 3845 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3846 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3847 if (C->canTrap()) 3848 return false; 3849 3850 switch (Inst->getOpcode()) { 3851 default: 3852 return true; 3853 case Instruction::UDiv: 3854 case Instruction::URem: { 3855 // x / y is undefined if y == 0. 3856 const APInt *V; 3857 if (match(Inst->getOperand(1), m_APInt(V))) 3858 return *V != 0; 3859 return false; 3860 } 3861 case Instruction::SDiv: 3862 case Instruction::SRem: { 3863 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3864 const APInt *Numerator, *Denominator; 3865 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3866 return false; 3867 // We cannot hoist this division if the denominator is 0. 3868 if (*Denominator == 0) 3869 return false; 3870 // It's safe to hoist if the denominator is not 0 or -1. 3871 if (*Denominator != -1) 3872 return true; 3873 // At this point we know that the denominator is -1. It is safe to hoist as 3874 // long we know that the numerator is not INT_MIN. 3875 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3876 return !Numerator->isMinSignedValue(); 3877 // The numerator *might* be MinSignedValue. 3878 return false; 3879 } 3880 case Instruction::Load: { 3881 const LoadInst *LI = cast<LoadInst>(Inst); 3882 if (!LI->isUnordered() || 3883 // Speculative load may create a race that did not exist in the source. 3884 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) || 3885 // Speculative load may load data from dirty regions. 3886 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 3887 LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 3888 return false; 3889 const DataLayout &DL = LI->getModule()->getDataLayout(); 3890 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), 3891 LI->getAlignment(), DL, CtxI, DT); 3892 } 3893 case Instruction::Call: { 3894 auto *CI = cast<const CallInst>(Inst); 3895 const Function *Callee = CI->getCalledFunction(); 3896 3897 // The called function could have undefined behavior or side-effects, even 3898 // if marked readnone nounwind. 3899 return Callee && Callee->isSpeculatable(); 3900 } 3901 case Instruction::VAArg: 3902 case Instruction::Alloca: 3903 case Instruction::Invoke: 3904 case Instruction::PHI: 3905 case Instruction::Store: 3906 case Instruction::Ret: 3907 case Instruction::Br: 3908 case Instruction::IndirectBr: 3909 case Instruction::Switch: 3910 case Instruction::Unreachable: 3911 case Instruction::Fence: 3912 case Instruction::AtomicRMW: 3913 case Instruction::AtomicCmpXchg: 3914 case Instruction::LandingPad: 3915 case Instruction::Resume: 3916 case Instruction::CatchSwitch: 3917 case Instruction::CatchPad: 3918 case Instruction::CatchRet: 3919 case Instruction::CleanupPad: 3920 case Instruction::CleanupRet: 3921 return false; // Misc instructions which have effects 3922 } 3923 } 3924 3925 bool llvm::mayBeMemoryDependent(const Instruction &I) { 3926 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 3927 } 3928 3929 OverflowResult llvm::computeOverflowForUnsignedMul( 3930 const Value *LHS, const Value *RHS, const DataLayout &DL, 3931 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 3932 bool UseInstrInfo) { 3933 // Multiplying n * m significant bits yields a result of n + m significant 3934 // bits. If the total number of significant bits does not exceed the 3935 // result bit width (minus 1), there is no overflow. 3936 // This means if we have enough leading zero bits in the operands 3937 // we can guarantee that the result does not overflow. 3938 // Ref: "Hacker's Delight" by Henry Warren 3939 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3940 KnownBits LHSKnown(BitWidth); 3941 KnownBits RHSKnown(BitWidth); 3942 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr, 3943 UseInstrInfo); 3944 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr, 3945 UseInstrInfo); 3946 // Note that underestimating the number of zero bits gives a more 3947 // conservative answer. 3948 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() + 3949 RHSKnown.countMinLeadingZeros(); 3950 // First handle the easy case: if we have enough zero bits there's 3951 // definitely no overflow. 3952 if (ZeroBits >= BitWidth) 3953 return OverflowResult::NeverOverflows; 3954 3955 // Get the largest possible values for each operand. 3956 APInt LHSMax = ~LHSKnown.Zero; 3957 APInt RHSMax = ~RHSKnown.Zero; 3958 3959 // We know the multiply operation doesn't overflow if the maximum values for 3960 // each operand will not overflow after we multiply them together. 3961 bool MaxOverflow; 3962 (void)LHSMax.umul_ov(RHSMax, MaxOverflow); 3963 if (!MaxOverflow) 3964 return OverflowResult::NeverOverflows; 3965 3966 // We know it always overflows if multiplying the smallest possible values for 3967 // the operands also results in overflow. 3968 bool MinOverflow; 3969 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow); 3970 if (MinOverflow) 3971 return OverflowResult::AlwaysOverflows; 3972 3973 return OverflowResult::MayOverflow; 3974 } 3975 3976 OverflowResult 3977 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS, 3978 const DataLayout &DL, AssumptionCache *AC, 3979 const Instruction *CxtI, 3980 const DominatorTree *DT, bool UseInstrInfo) { 3981 // Multiplying n * m significant bits yields a result of n + m significant 3982 // bits. If the total number of significant bits does not exceed the 3983 // result bit width (minus 1), there is no overflow. 3984 // This means if we have enough leading sign bits in the operands 3985 // we can guarantee that the result does not overflow. 3986 // Ref: "Hacker's Delight" by Henry Warren 3987 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3988 3989 // Note that underestimating the number of sign bits gives a more 3990 // conservative answer. 3991 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + 3992 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); 3993 3994 // First handle the easy case: if we have enough sign bits there's 3995 // definitely no overflow. 3996 if (SignBits > BitWidth + 1) 3997 return OverflowResult::NeverOverflows; 3998 3999 // There are two ambiguous cases where there can be no overflow: 4000 // SignBits == BitWidth + 1 and 4001 // SignBits == BitWidth 4002 // The second case is difficult to check, therefore we only handle the 4003 // first case. 4004 if (SignBits == BitWidth + 1) { 4005 // It overflows only when both arguments are negative and the true 4006 // product is exactly the minimum negative number. 4007 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 4008 // For simplicity we just check if at least one side is not negative. 4009 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 4010 nullptr, UseInstrInfo); 4011 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 4012 nullptr, UseInstrInfo); 4013 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) 4014 return OverflowResult::NeverOverflows; 4015 } 4016 return OverflowResult::MayOverflow; 4017 } 4018 4019 OverflowResult llvm::computeOverflowForUnsignedAdd( 4020 const Value *LHS, const Value *RHS, const DataLayout &DL, 4021 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4022 bool UseInstrInfo) { 4023 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 4024 nullptr, UseInstrInfo); 4025 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) { 4026 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 4027 nullptr, UseInstrInfo); 4028 4029 if (LHSKnown.isNegative() && RHSKnown.isNegative()) { 4030 // The sign bit is set in both cases: this MUST overflow. 4031 return OverflowResult::AlwaysOverflows; 4032 } 4033 4034 if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) { 4035 // The sign bit is clear in both cases: this CANNOT overflow. 4036 return OverflowResult::NeverOverflows; 4037 } 4038 } 4039 4040 return OverflowResult::MayOverflow; 4041 } 4042 4043 /// Return true if we can prove that adding the two values of the 4044 /// knownbits will not overflow. 4045 /// Otherwise return false. 4046 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown, 4047 const KnownBits &RHSKnown) { 4048 // Addition of two 2's complement numbers having opposite signs will never 4049 // overflow. 4050 if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) || 4051 (LHSKnown.isNonNegative() && RHSKnown.isNegative())) 4052 return true; 4053 4054 // If either of the values is known to be non-negative, adding them can only 4055 // overflow if the second is also non-negative, so we can assume that. 4056 // Two non-negative numbers will only overflow if there is a carry to the 4057 // sign bit, so we can check if even when the values are as big as possible 4058 // there is no overflow to the sign bit. 4059 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) { 4060 APInt MaxLHS = ~LHSKnown.Zero; 4061 MaxLHS.clearSignBit(); 4062 APInt MaxRHS = ~RHSKnown.Zero; 4063 MaxRHS.clearSignBit(); 4064 APInt Result = std::move(MaxLHS) + std::move(MaxRHS); 4065 return Result.isSignBitClear(); 4066 } 4067 4068 // If either of the values is known to be negative, adding them can only 4069 // overflow if the second is also negative, so we can assume that. 4070 // Two negative number will only overflow if there is no carry to the sign 4071 // bit, so we can check if even when the values are as small as possible 4072 // there is overflow to the sign bit. 4073 if (LHSKnown.isNegative() || RHSKnown.isNegative()) { 4074 APInt MinLHS = LHSKnown.One; 4075 MinLHS.clearSignBit(); 4076 APInt MinRHS = RHSKnown.One; 4077 MinRHS.clearSignBit(); 4078 APInt Result = std::move(MinLHS) + std::move(MinRHS); 4079 return Result.isSignBitSet(); 4080 } 4081 4082 // If we reached here it means that we know nothing about the sign bits. 4083 // In this case we can't know if there will be an overflow, since by 4084 // changing the sign bits any two values can be made to overflow. 4085 return false; 4086 } 4087 4088 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 4089 const Value *RHS, 4090 const AddOperator *Add, 4091 const DataLayout &DL, 4092 AssumptionCache *AC, 4093 const Instruction *CxtI, 4094 const DominatorTree *DT) { 4095 if (Add && Add->hasNoSignedWrap()) { 4096 return OverflowResult::NeverOverflows; 4097 } 4098 4099 // If LHS and RHS each have at least two sign bits, the addition will look 4100 // like 4101 // 4102 // XX..... + 4103 // YY..... 4104 // 4105 // If the carry into the most significant position is 0, X and Y can't both 4106 // be 1 and therefore the carry out of the addition is also 0. 4107 // 4108 // If the carry into the most significant position is 1, X and Y can't both 4109 // be 0 and therefore the carry out of the addition is also 1. 4110 // 4111 // Since the carry into the most significant position is always equal to 4112 // the carry out of the addition, there is no signed overflow. 4113 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4114 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4115 return OverflowResult::NeverOverflows; 4116 4117 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 4118 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 4119 4120 if (checkRippleForSignedAdd(LHSKnown, RHSKnown)) 4121 return OverflowResult::NeverOverflows; 4122 4123 // The remaining code needs Add to be available. Early returns if not so. 4124 if (!Add) 4125 return OverflowResult::MayOverflow; 4126 4127 // If the sign of Add is the same as at least one of the operands, this add 4128 // CANNOT overflow. This is particularly useful when the sum is 4129 // @llvm.assume'ed non-negative rather than proved so from analyzing its 4130 // operands. 4131 bool LHSOrRHSKnownNonNegative = 4132 (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()); 4133 bool LHSOrRHSKnownNegative = 4134 (LHSKnown.isNegative() || RHSKnown.isNegative()); 4135 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 4136 KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT); 4137 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 4138 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) { 4139 return OverflowResult::NeverOverflows; 4140 } 4141 } 4142 4143 return OverflowResult::MayOverflow; 4144 } 4145 4146 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, 4147 const Value *RHS, 4148 const DataLayout &DL, 4149 AssumptionCache *AC, 4150 const Instruction *CxtI, 4151 const DominatorTree *DT) { 4152 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 4153 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) { 4154 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 4155 4156 // If the LHS is negative and the RHS is non-negative, no unsigned wrap. 4157 if (LHSKnown.isNegative() && RHSKnown.isNonNegative()) 4158 return OverflowResult::NeverOverflows; 4159 4160 // If the LHS is non-negative and the RHS negative, we always wrap. 4161 if (LHSKnown.isNonNegative() && RHSKnown.isNegative()) 4162 return OverflowResult::AlwaysOverflows; 4163 } 4164 4165 return OverflowResult::MayOverflow; 4166 } 4167 4168 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, 4169 const Value *RHS, 4170 const DataLayout &DL, 4171 AssumptionCache *AC, 4172 const Instruction *CxtI, 4173 const DominatorTree *DT) { 4174 // If LHS and RHS each have at least two sign bits, the subtraction 4175 // cannot overflow. 4176 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4177 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4178 return OverflowResult::NeverOverflows; 4179 4180 KnownBits LHSKnown = computeKnownBits(LHS, DL, 0, AC, CxtI, DT); 4181 4182 KnownBits RHSKnown = computeKnownBits(RHS, DL, 0, AC, CxtI, DT); 4183 4184 // Subtraction of two 2's complement numbers having identical signs will 4185 // never overflow. 4186 if ((LHSKnown.isNegative() && RHSKnown.isNegative()) || 4187 (LHSKnown.isNonNegative() && RHSKnown.isNonNegative())) 4188 return OverflowResult::NeverOverflows; 4189 4190 // TODO: implement logic similar to checkRippleForAdd 4191 return OverflowResult::MayOverflow; 4192 } 4193 4194 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, 4195 const DominatorTree &DT) { 4196 #ifndef NDEBUG 4197 auto IID = II->getIntrinsicID(); 4198 assert((IID == Intrinsic::sadd_with_overflow || 4199 IID == Intrinsic::uadd_with_overflow || 4200 IID == Intrinsic::ssub_with_overflow || 4201 IID == Intrinsic::usub_with_overflow || 4202 IID == Intrinsic::smul_with_overflow || 4203 IID == Intrinsic::umul_with_overflow) && 4204 "Not an overflow intrinsic!"); 4205 #endif 4206 4207 SmallVector<const BranchInst *, 2> GuardingBranches; 4208 SmallVector<const ExtractValueInst *, 2> Results; 4209 4210 for (const User *U : II->users()) { 4211 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 4212 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 4213 4214 if (EVI->getIndices()[0] == 0) 4215 Results.push_back(EVI); 4216 else { 4217 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 4218 4219 for (const auto *U : EVI->users()) 4220 if (const auto *B = dyn_cast<BranchInst>(U)) { 4221 assert(B->isConditional() && "How else is it using an i1?"); 4222 GuardingBranches.push_back(B); 4223 } 4224 } 4225 } else { 4226 // We are using the aggregate directly in a way we don't want to analyze 4227 // here (storing it to a global, say). 4228 return false; 4229 } 4230 } 4231 4232 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 4233 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 4234 if (!NoWrapEdge.isSingleEdge()) 4235 return false; 4236 4237 // Check if all users of the add are provably no-wrap. 4238 for (const auto *Result : Results) { 4239 // If the extractvalue itself is not executed on overflow, the we don't 4240 // need to check each use separately, since domination is transitive. 4241 if (DT.dominates(NoWrapEdge, Result->getParent())) 4242 continue; 4243 4244 for (auto &RU : Result->uses()) 4245 if (!DT.dominates(NoWrapEdge, RU)) 4246 return false; 4247 } 4248 4249 return true; 4250 }; 4251 4252 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); 4253 } 4254 4255 4256 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 4257 const DataLayout &DL, 4258 AssumptionCache *AC, 4259 const Instruction *CxtI, 4260 const DominatorTree *DT) { 4261 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 4262 Add, DL, AC, CxtI, DT); 4263 } 4264 4265 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 4266 const Value *RHS, 4267 const DataLayout &DL, 4268 AssumptionCache *AC, 4269 const Instruction *CxtI, 4270 const DominatorTree *DT) { 4271 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 4272 } 4273 4274 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 4275 // A memory operation returns normally if it isn't volatile. A volatile 4276 // operation is allowed to trap. 4277 // 4278 // An atomic operation isn't guaranteed to return in a reasonable amount of 4279 // time because it's possible for another thread to interfere with it for an 4280 // arbitrary length of time, but programs aren't allowed to rely on that. 4281 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 4282 return !LI->isVolatile(); 4283 if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 4284 return !SI->isVolatile(); 4285 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 4286 return !CXI->isVolatile(); 4287 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 4288 return !RMWI->isVolatile(); 4289 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I)) 4290 return !MII->isVolatile(); 4291 4292 // If there is no successor, then execution can't transfer to it. 4293 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) 4294 return !CRI->unwindsToCaller(); 4295 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) 4296 return !CatchSwitch->unwindsToCaller(); 4297 if (isa<ResumeInst>(I)) 4298 return false; 4299 if (isa<ReturnInst>(I)) 4300 return false; 4301 if (isa<UnreachableInst>(I)) 4302 return false; 4303 4304 // Calls can throw, or contain an infinite loop, or kill the process. 4305 if (auto CS = ImmutableCallSite(I)) { 4306 // Call sites that throw have implicit non-local control flow. 4307 if (!CS.doesNotThrow()) 4308 return false; 4309 4310 // Non-throwing call sites can loop infinitely, call exit/pthread_exit 4311 // etc. and thus not return. However, LLVM already assumes that 4312 // 4313 // - Thread exiting actions are modeled as writes to memory invisible to 4314 // the program. 4315 // 4316 // - Loops that don't have side effects (side effects are volatile/atomic 4317 // stores and IO) always terminate (see http://llvm.org/PR965). 4318 // Furthermore IO itself is also modeled as writes to memory invisible to 4319 // the program. 4320 // 4321 // We rely on those assumptions here, and use the memory effects of the call 4322 // target as a proxy for checking that it always returns. 4323 4324 // FIXME: This isn't aggressive enough; a call which only writes to a global 4325 // is guaranteed to return. 4326 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() || 4327 match(I, m_Intrinsic<Intrinsic::assume>()) || 4328 match(I, m_Intrinsic<Intrinsic::sideeffect>()); 4329 } 4330 4331 // Other instructions return normally. 4332 return true; 4333 } 4334 4335 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { 4336 // TODO: This is slightly consdervative for invoke instruction since exiting 4337 // via an exception *is* normal control for them. 4338 for (auto I = BB->begin(), E = BB->end(); I != E; ++I) 4339 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 4340 return false; 4341 return true; 4342 } 4343 4344 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 4345 const Loop *L) { 4346 // The loop header is guaranteed to be executed for every iteration. 4347 // 4348 // FIXME: Relax this constraint to cover all basic blocks that are 4349 // guaranteed to be executed at every iteration. 4350 if (I->getParent() != L->getHeader()) return false; 4351 4352 for (const Instruction &LI : *L->getHeader()) { 4353 if (&LI == I) return true; 4354 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 4355 } 4356 llvm_unreachable("Instruction not contained in its own parent basic block."); 4357 } 4358 4359 bool llvm::propagatesFullPoison(const Instruction *I) { 4360 switch (I->getOpcode()) { 4361 case Instruction::Add: 4362 case Instruction::Sub: 4363 case Instruction::Xor: 4364 case Instruction::Trunc: 4365 case Instruction::BitCast: 4366 case Instruction::AddrSpaceCast: 4367 case Instruction::Mul: 4368 case Instruction::Shl: 4369 case Instruction::GetElementPtr: 4370 // These operations all propagate poison unconditionally. Note that poison 4371 // is not any particular value, so xor or subtraction of poison with 4372 // itself still yields poison, not zero. 4373 return true; 4374 4375 case Instruction::AShr: 4376 case Instruction::SExt: 4377 // For these operations, one bit of the input is replicated across 4378 // multiple output bits. A replicated poison bit is still poison. 4379 return true; 4380 4381 case Instruction::ICmp: 4382 // Comparing poison with any value yields poison. This is why, for 4383 // instance, x s< (x +nsw 1) can be folded to true. 4384 return true; 4385 4386 default: 4387 return false; 4388 } 4389 } 4390 4391 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) { 4392 switch (I->getOpcode()) { 4393 case Instruction::Store: 4394 return cast<StoreInst>(I)->getPointerOperand(); 4395 4396 case Instruction::Load: 4397 return cast<LoadInst>(I)->getPointerOperand(); 4398 4399 case Instruction::AtomicCmpXchg: 4400 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 4401 4402 case Instruction::AtomicRMW: 4403 return cast<AtomicRMWInst>(I)->getPointerOperand(); 4404 4405 case Instruction::UDiv: 4406 case Instruction::SDiv: 4407 case Instruction::URem: 4408 case Instruction::SRem: 4409 return I->getOperand(1); 4410 4411 default: 4412 return nullptr; 4413 } 4414 } 4415 4416 bool llvm::mustTriggerUB(const Instruction *I, 4417 const SmallSet<const Value *, 16>& KnownPoison) { 4418 auto *NotPoison = getGuaranteedNonFullPoisonOp(I); 4419 return (NotPoison && KnownPoison.count(NotPoison)); 4420 } 4421 4422 4423 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) { 4424 // We currently only look for uses of poison values within the same basic 4425 // block, as that makes it easier to guarantee that the uses will be 4426 // executed given that PoisonI is executed. 4427 // 4428 // FIXME: Expand this to consider uses beyond the same basic block. To do 4429 // this, look out for the distinction between post-dominance and strong 4430 // post-dominance. 4431 const BasicBlock *BB = PoisonI->getParent(); 4432 4433 // Set of instructions that we have proved will yield poison if PoisonI 4434 // does. 4435 SmallSet<const Value *, 16> YieldsPoison; 4436 SmallSet<const BasicBlock *, 4> Visited; 4437 YieldsPoison.insert(PoisonI); 4438 Visited.insert(PoisonI->getParent()); 4439 4440 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end(); 4441 4442 unsigned Iter = 0; 4443 while (Iter++ < MaxDepth) { 4444 for (auto &I : make_range(Begin, End)) { 4445 if (&I != PoisonI) { 4446 if (mustTriggerUB(&I, YieldsPoison)) 4447 return true; 4448 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 4449 return false; 4450 } 4451 4452 // Mark poison that propagates from I through uses of I. 4453 if (YieldsPoison.count(&I)) { 4454 for (const User *User : I.users()) { 4455 const Instruction *UserI = cast<Instruction>(User); 4456 if (propagatesFullPoison(UserI)) 4457 YieldsPoison.insert(User); 4458 } 4459 } 4460 } 4461 4462 if (auto *NextBB = BB->getSingleSuccessor()) { 4463 if (Visited.insert(NextBB).second) { 4464 BB = NextBB; 4465 Begin = BB->getFirstNonPHI()->getIterator(); 4466 End = BB->end(); 4467 continue; 4468 } 4469 } 4470 4471 break; 4472 } 4473 return false; 4474 } 4475 4476 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 4477 if (FMF.noNaNs()) 4478 return true; 4479 4480 if (auto *C = dyn_cast<ConstantFP>(V)) 4481 return !C->isNaN(); 4482 4483 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 4484 if (!C->getElementType()->isFloatingPointTy()) 4485 return false; 4486 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 4487 if (C->getElementAsAPFloat(I).isNaN()) 4488 return false; 4489 } 4490 return true; 4491 } 4492 4493 return false; 4494 } 4495 4496 static bool isKnownNonZero(const Value *V) { 4497 if (auto *C = dyn_cast<ConstantFP>(V)) 4498 return !C->isZero(); 4499 4500 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 4501 if (!C->getElementType()->isFloatingPointTy()) 4502 return false; 4503 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 4504 if (C->getElementAsAPFloat(I).isZero()) 4505 return false; 4506 } 4507 return true; 4508 } 4509 4510 return false; 4511 } 4512 4513 /// Match clamp pattern for float types without care about NaNs or signed zeros. 4514 /// Given non-min/max outer cmp/select from the clamp pattern this 4515 /// function recognizes if it can be substitued by a "canonical" min/max 4516 /// pattern. 4517 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, 4518 Value *CmpLHS, Value *CmpRHS, 4519 Value *TrueVal, Value *FalseVal, 4520 Value *&LHS, Value *&RHS) { 4521 // Try to match 4522 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) 4523 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) 4524 // and return description of the outer Max/Min. 4525 4526 // First, check if select has inverse order: 4527 if (CmpRHS == FalseVal) { 4528 std::swap(TrueVal, FalseVal); 4529 Pred = CmpInst::getInversePredicate(Pred); 4530 } 4531 4532 // Assume success now. If there's no match, callers should not use these anyway. 4533 LHS = TrueVal; 4534 RHS = FalseVal; 4535 4536 const APFloat *FC1; 4537 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) 4538 return {SPF_UNKNOWN, SPNB_NA, false}; 4539 4540 const APFloat *FC2; 4541 switch (Pred) { 4542 case CmpInst::FCMP_OLT: 4543 case CmpInst::FCMP_OLE: 4544 case CmpInst::FCMP_ULT: 4545 case CmpInst::FCMP_ULE: 4546 if (match(FalseVal, 4547 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), 4548 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4549 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan) 4550 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; 4551 break; 4552 case CmpInst::FCMP_OGT: 4553 case CmpInst::FCMP_OGE: 4554 case CmpInst::FCMP_UGT: 4555 case CmpInst::FCMP_UGE: 4556 if (match(FalseVal, 4557 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), 4558 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4559 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan) 4560 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; 4561 break; 4562 default: 4563 break; 4564 } 4565 4566 return {SPF_UNKNOWN, SPNB_NA, false}; 4567 } 4568 4569 /// Recognize variations of: 4570 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 4571 static SelectPatternResult matchClamp(CmpInst::Predicate Pred, 4572 Value *CmpLHS, Value *CmpRHS, 4573 Value *TrueVal, Value *FalseVal) { 4574 // Swap the select operands and predicate to match the patterns below. 4575 if (CmpRHS != TrueVal) { 4576 Pred = ICmpInst::getSwappedPredicate(Pred); 4577 std::swap(TrueVal, FalseVal); 4578 } 4579 const APInt *C1; 4580 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 4581 const APInt *C2; 4582 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 4583 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 4584 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 4585 return {SPF_SMAX, SPNB_NA, false}; 4586 4587 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 4588 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 4589 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 4590 return {SPF_SMIN, SPNB_NA, false}; 4591 4592 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 4593 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 4594 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 4595 return {SPF_UMAX, SPNB_NA, false}; 4596 4597 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 4598 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 4599 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 4600 return {SPF_UMIN, SPNB_NA, false}; 4601 } 4602 return {SPF_UNKNOWN, SPNB_NA, false}; 4603 } 4604 4605 /// Recognize variations of: 4606 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) 4607 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, 4608 Value *CmpLHS, Value *CmpRHS, 4609 Value *TVal, Value *FVal, 4610 unsigned Depth) { 4611 // TODO: Allow FP min/max with nnan/nsz. 4612 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison"); 4613 4614 Value *A, *B; 4615 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); 4616 if (!SelectPatternResult::isMinOrMax(L.Flavor)) 4617 return {SPF_UNKNOWN, SPNB_NA, false}; 4618 4619 Value *C, *D; 4620 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); 4621 if (L.Flavor != R.Flavor) 4622 return {SPF_UNKNOWN, SPNB_NA, false}; 4623 4624 // We have something like: x Pred y ? min(a, b) : min(c, d). 4625 // Try to match the compare to the min/max operations of the select operands. 4626 // First, make sure we have the right compare predicate. 4627 switch (L.Flavor) { 4628 case SPF_SMIN: 4629 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { 4630 Pred = ICmpInst::getSwappedPredicate(Pred); 4631 std::swap(CmpLHS, CmpRHS); 4632 } 4633 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 4634 break; 4635 return {SPF_UNKNOWN, SPNB_NA, false}; 4636 case SPF_SMAX: 4637 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { 4638 Pred = ICmpInst::getSwappedPredicate(Pred); 4639 std::swap(CmpLHS, CmpRHS); 4640 } 4641 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 4642 break; 4643 return {SPF_UNKNOWN, SPNB_NA, false}; 4644 case SPF_UMIN: 4645 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { 4646 Pred = ICmpInst::getSwappedPredicate(Pred); 4647 std::swap(CmpLHS, CmpRHS); 4648 } 4649 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 4650 break; 4651 return {SPF_UNKNOWN, SPNB_NA, false}; 4652 case SPF_UMAX: 4653 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 4654 Pred = ICmpInst::getSwappedPredicate(Pred); 4655 std::swap(CmpLHS, CmpRHS); 4656 } 4657 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 4658 break; 4659 return {SPF_UNKNOWN, SPNB_NA, false}; 4660 default: 4661 return {SPF_UNKNOWN, SPNB_NA, false}; 4662 } 4663 4664 // If there is a common operand in the already matched min/max and the other 4665 // min/max operands match the compare operands (either directly or inverted), 4666 // then this is min/max of the same flavor. 4667 4668 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4669 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4670 if (D == B) { 4671 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4672 match(A, m_Not(m_Specific(CmpRHS))))) 4673 return {L.Flavor, SPNB_NA, false}; 4674 } 4675 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4676 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4677 if (C == B) { 4678 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4679 match(A, m_Not(m_Specific(CmpRHS))))) 4680 return {L.Flavor, SPNB_NA, false}; 4681 } 4682 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4683 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4684 if (D == A) { 4685 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4686 match(B, m_Not(m_Specific(CmpRHS))))) 4687 return {L.Flavor, SPNB_NA, false}; 4688 } 4689 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4690 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4691 if (C == A) { 4692 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4693 match(B, m_Not(m_Specific(CmpRHS))))) 4694 return {L.Flavor, SPNB_NA, false}; 4695 } 4696 4697 return {SPF_UNKNOWN, SPNB_NA, false}; 4698 } 4699 4700 /// Match non-obvious integer minimum and maximum sequences. 4701 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 4702 Value *CmpLHS, Value *CmpRHS, 4703 Value *TrueVal, Value *FalseVal, 4704 Value *&LHS, Value *&RHS, 4705 unsigned Depth) { 4706 // Assume success. If there's no match, callers should not use these anyway. 4707 LHS = TrueVal; 4708 RHS = FalseVal; 4709 4710 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); 4711 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4712 return SPR; 4713 4714 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); 4715 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4716 return SPR; 4717 4718 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 4719 return {SPF_UNKNOWN, SPNB_NA, false}; 4720 4721 // Z = X -nsw Y 4722 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) 4723 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) 4724 if (match(TrueVal, m_Zero()) && 4725 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4726 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4727 4728 // Z = X -nsw Y 4729 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) 4730 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) 4731 if (match(FalseVal, m_Zero()) && 4732 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4733 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4734 4735 const APInt *C1; 4736 if (!match(CmpRHS, m_APInt(C1))) 4737 return {SPF_UNKNOWN, SPNB_NA, false}; 4738 4739 // An unsigned min/max can be written with a signed compare. 4740 const APInt *C2; 4741 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 4742 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 4743 // Is the sign bit set? 4744 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 4745 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 4746 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() && 4747 C2->isMaxSignedValue()) 4748 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4749 4750 // Is the sign bit clear? 4751 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 4752 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 4753 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && 4754 C2->isMinSignedValue()) 4755 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4756 } 4757 4758 // Look through 'not' ops to find disguised signed min/max. 4759 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C) 4760 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C) 4761 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) && 4762 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2) 4763 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4764 4765 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X) 4766 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X) 4767 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) && 4768 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2) 4769 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4770 4771 return {SPF_UNKNOWN, SPNB_NA, false}; 4772 } 4773 4774 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) { 4775 assert(X && Y && "Invalid operand"); 4776 4777 // X = sub (0, Y) || X = sub nsw (0, Y) 4778 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) || 4779 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y))))) 4780 return true; 4781 4782 // Y = sub (0, X) || Y = sub nsw (0, X) 4783 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) || 4784 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X))))) 4785 return true; 4786 4787 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A) 4788 Value *A, *B; 4789 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) && 4790 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) || 4791 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) && 4792 match(Y, m_NSWSub(m_Specific(B), m_Specific(A))))); 4793 } 4794 4795 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 4796 FastMathFlags FMF, 4797 Value *CmpLHS, Value *CmpRHS, 4798 Value *TrueVal, Value *FalseVal, 4799 Value *&LHS, Value *&RHS, 4800 unsigned Depth) { 4801 if (CmpInst::isFPPredicate(Pred)) { 4802 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one 4803 // 0.0 operand, set the compare's 0.0 operands to that same value for the 4804 // purpose of identifying min/max. Disregard vector constants with undefined 4805 // elements because those can not be back-propagated for analysis. 4806 Value *OutputZeroVal = nullptr; 4807 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) && 4808 !cast<Constant>(TrueVal)->containsUndefElement()) 4809 OutputZeroVal = TrueVal; 4810 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) && 4811 !cast<Constant>(FalseVal)->containsUndefElement()) 4812 OutputZeroVal = FalseVal; 4813 4814 if (OutputZeroVal) { 4815 if (match(CmpLHS, m_AnyZeroFP())) 4816 CmpLHS = OutputZeroVal; 4817 if (match(CmpRHS, m_AnyZeroFP())) 4818 CmpRHS = OutputZeroVal; 4819 } 4820 } 4821 4822 LHS = CmpLHS; 4823 RHS = CmpRHS; 4824 4825 // Signed zero may return inconsistent results between implementations. 4826 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 4827 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 4828 // Therefore, we behave conservatively and only proceed if at least one of the 4829 // operands is known to not be zero or if we don't care about signed zero. 4830 switch (Pred) { 4831 default: break; 4832 // FIXME: Include OGT/OLT/UGT/ULT. 4833 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 4834 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 4835 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4836 !isKnownNonZero(CmpRHS)) 4837 return {SPF_UNKNOWN, SPNB_NA, false}; 4838 } 4839 4840 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 4841 bool Ordered = false; 4842 4843 // When given one NaN and one non-NaN input: 4844 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 4845 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 4846 // ordered comparison fails), which could be NaN or non-NaN. 4847 // so here we discover exactly what NaN behavior is required/accepted. 4848 if (CmpInst::isFPPredicate(Pred)) { 4849 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 4850 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 4851 4852 if (LHSSafe && RHSSafe) { 4853 // Both operands are known non-NaN. 4854 NaNBehavior = SPNB_RETURNS_ANY; 4855 } else if (CmpInst::isOrdered(Pred)) { 4856 // An ordered comparison will return false when given a NaN, so it 4857 // returns the RHS. 4858 Ordered = true; 4859 if (LHSSafe) 4860 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 4861 NaNBehavior = SPNB_RETURNS_NAN; 4862 else if (RHSSafe) 4863 NaNBehavior = SPNB_RETURNS_OTHER; 4864 else 4865 // Completely unsafe. 4866 return {SPF_UNKNOWN, SPNB_NA, false}; 4867 } else { 4868 Ordered = false; 4869 // An unordered comparison will return true when given a NaN, so it 4870 // returns the LHS. 4871 if (LHSSafe) 4872 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 4873 NaNBehavior = SPNB_RETURNS_OTHER; 4874 else if (RHSSafe) 4875 NaNBehavior = SPNB_RETURNS_NAN; 4876 else 4877 // Completely unsafe. 4878 return {SPF_UNKNOWN, SPNB_NA, false}; 4879 } 4880 } 4881 4882 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 4883 std::swap(CmpLHS, CmpRHS); 4884 Pred = CmpInst::getSwappedPredicate(Pred); 4885 if (NaNBehavior == SPNB_RETURNS_NAN) 4886 NaNBehavior = SPNB_RETURNS_OTHER; 4887 else if (NaNBehavior == SPNB_RETURNS_OTHER) 4888 NaNBehavior = SPNB_RETURNS_NAN; 4889 Ordered = !Ordered; 4890 } 4891 4892 // ([if]cmp X, Y) ? X : Y 4893 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 4894 switch (Pred) { 4895 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 4896 case ICmpInst::ICMP_UGT: 4897 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 4898 case ICmpInst::ICMP_SGT: 4899 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 4900 case ICmpInst::ICMP_ULT: 4901 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 4902 case ICmpInst::ICMP_SLT: 4903 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 4904 case FCmpInst::FCMP_UGT: 4905 case FCmpInst::FCMP_UGE: 4906 case FCmpInst::FCMP_OGT: 4907 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 4908 case FCmpInst::FCMP_ULT: 4909 case FCmpInst::FCMP_ULE: 4910 case FCmpInst::FCMP_OLT: 4911 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 4912 } 4913 } 4914 4915 if (isKnownNegation(TrueVal, FalseVal)) { 4916 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can 4917 // match against either LHS or sext(LHS). 4918 auto MaybeSExtCmpLHS = 4919 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS))); 4920 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes()); 4921 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One()); 4922 if (match(TrueVal, MaybeSExtCmpLHS)) { 4923 // Set the return values. If the compare uses the negated value (-X >s 0), 4924 // swap the return values because the negated value is always 'RHS'. 4925 LHS = TrueVal; 4926 RHS = FalseVal; 4927 if (match(CmpLHS, m_Neg(m_Specific(FalseVal)))) 4928 std::swap(LHS, RHS); 4929 4930 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X) 4931 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X) 4932 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 4933 return {SPF_ABS, SPNB_NA, false}; 4934 4935 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X) 4936 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X) 4937 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 4938 return {SPF_NABS, SPNB_NA, false}; 4939 } 4940 else if (match(FalseVal, MaybeSExtCmpLHS)) { 4941 // Set the return values. If the compare uses the negated value (-X >s 0), 4942 // swap the return values because the negated value is always 'RHS'. 4943 LHS = FalseVal; 4944 RHS = TrueVal; 4945 if (match(CmpLHS, m_Neg(m_Specific(TrueVal)))) 4946 std::swap(LHS, RHS); 4947 4948 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X) 4949 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X) 4950 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 4951 return {SPF_NABS, SPNB_NA, false}; 4952 4953 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X) 4954 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X) 4955 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 4956 return {SPF_ABS, SPNB_NA, false}; 4957 } 4958 } 4959 4960 if (CmpInst::isIntPredicate(Pred)) 4961 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); 4962 4963 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar 4964 // may return either -0.0 or 0.0, so fcmp/select pair has stricter 4965 // semantics than minNum. Be conservative in such case. 4966 if (NaNBehavior != SPNB_RETURNS_ANY || 4967 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4968 !isKnownNonZero(CmpRHS))) 4969 return {SPF_UNKNOWN, SPNB_NA, false}; 4970 4971 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 4972 } 4973 4974 /// Helps to match a select pattern in case of a type mismatch. 4975 /// 4976 /// The function processes the case when type of true and false values of a 4977 /// select instruction differs from type of the cmp instruction operands because 4978 /// of a cast instruction. The function checks if it is legal to move the cast 4979 /// operation after "select". If yes, it returns the new second value of 4980 /// "select" (with the assumption that cast is moved): 4981 /// 1. As operand of cast instruction when both values of "select" are same cast 4982 /// instructions. 4983 /// 2. As restored constant (by applying reverse cast operation) when the first 4984 /// value of the "select" is a cast operation and the second value is a 4985 /// constant. 4986 /// NOTE: We return only the new second value because the first value could be 4987 /// accessed as operand of cast instruction. 4988 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 4989 Instruction::CastOps *CastOp) { 4990 auto *Cast1 = dyn_cast<CastInst>(V1); 4991 if (!Cast1) 4992 return nullptr; 4993 4994 *CastOp = Cast1->getOpcode(); 4995 Type *SrcTy = Cast1->getSrcTy(); 4996 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 4997 // If V1 and V2 are both the same cast from the same type, look through V1. 4998 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 4999 return Cast2->getOperand(0); 5000 return nullptr; 5001 } 5002 5003 auto *C = dyn_cast<Constant>(V2); 5004 if (!C) 5005 return nullptr; 5006 5007 Constant *CastedTo = nullptr; 5008 switch (*CastOp) { 5009 case Instruction::ZExt: 5010 if (CmpI->isUnsigned()) 5011 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 5012 break; 5013 case Instruction::SExt: 5014 if (CmpI->isSigned()) 5015 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 5016 break; 5017 case Instruction::Trunc: 5018 Constant *CmpConst; 5019 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && 5020 CmpConst->getType() == SrcTy) { 5021 // Here we have the following case: 5022 // 5023 // %cond = cmp iN %x, CmpConst 5024 // %tr = trunc iN %x to iK 5025 // %narrowsel = select i1 %cond, iK %t, iK C 5026 // 5027 // We can always move trunc after select operation: 5028 // 5029 // %cond = cmp iN %x, CmpConst 5030 // %widesel = select i1 %cond, iN %x, iN CmpConst 5031 // %tr = trunc iN %widesel to iK 5032 // 5033 // Note that C could be extended in any way because we don't care about 5034 // upper bits after truncation. It can't be abs pattern, because it would 5035 // look like: 5036 // 5037 // select i1 %cond, x, -x. 5038 // 5039 // So only min/max pattern could be matched. Such match requires widened C 5040 // == CmpConst. That is why set widened C = CmpConst, condition trunc 5041 // CmpConst == C is checked below. 5042 CastedTo = CmpConst; 5043 } else { 5044 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 5045 } 5046 break; 5047 case Instruction::FPTrunc: 5048 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 5049 break; 5050 case Instruction::FPExt: 5051 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 5052 break; 5053 case Instruction::FPToUI: 5054 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 5055 break; 5056 case Instruction::FPToSI: 5057 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 5058 break; 5059 case Instruction::UIToFP: 5060 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 5061 break; 5062 case Instruction::SIToFP: 5063 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 5064 break; 5065 default: 5066 break; 5067 } 5068 5069 if (!CastedTo) 5070 return nullptr; 5071 5072 // Make sure the cast doesn't lose any information. 5073 Constant *CastedBack = 5074 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 5075 if (CastedBack != C) 5076 return nullptr; 5077 5078 return CastedTo; 5079 } 5080 5081 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 5082 Instruction::CastOps *CastOp, 5083 unsigned Depth) { 5084 if (Depth >= MaxDepth) 5085 return {SPF_UNKNOWN, SPNB_NA, false}; 5086 5087 SelectInst *SI = dyn_cast<SelectInst>(V); 5088 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 5089 5090 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 5091 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 5092 5093 CmpInst::Predicate Pred = CmpI->getPredicate(); 5094 Value *CmpLHS = CmpI->getOperand(0); 5095 Value *CmpRHS = CmpI->getOperand(1); 5096 Value *TrueVal = SI->getTrueValue(); 5097 Value *FalseVal = SI->getFalseValue(); 5098 FastMathFlags FMF; 5099 if (isa<FPMathOperator>(CmpI)) 5100 FMF = CmpI->getFastMathFlags(); 5101 5102 // Bail out early. 5103 if (CmpI->isEquality()) 5104 return {SPF_UNKNOWN, SPNB_NA, false}; 5105 5106 // Deal with type mismatches. 5107 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 5108 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { 5109 // If this is a potential fmin/fmax with a cast to integer, then ignore 5110 // -0.0 because there is no corresponding integer value. 5111 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 5112 FMF.setNoSignedZeros(); 5113 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 5114 cast<CastInst>(TrueVal)->getOperand(0), C, 5115 LHS, RHS, Depth); 5116 } 5117 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { 5118 // If this is a potential fmin/fmax with a cast to integer, then ignore 5119 // -0.0 because there is no corresponding integer value. 5120 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 5121 FMF.setNoSignedZeros(); 5122 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 5123 C, cast<CastInst>(FalseVal)->getOperand(0), 5124 LHS, RHS, Depth); 5125 } 5126 } 5127 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 5128 LHS, RHS, Depth); 5129 } 5130 5131 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) { 5132 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT; 5133 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT; 5134 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT; 5135 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT; 5136 if (SPF == SPF_FMINNUM) 5137 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; 5138 if (SPF == SPF_FMAXNUM) 5139 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; 5140 llvm_unreachable("unhandled!"); 5141 } 5142 5143 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) { 5144 if (SPF == SPF_SMIN) return SPF_SMAX; 5145 if (SPF == SPF_UMIN) return SPF_UMAX; 5146 if (SPF == SPF_SMAX) return SPF_SMIN; 5147 if (SPF == SPF_UMAX) return SPF_UMIN; 5148 llvm_unreachable("unhandled!"); 5149 } 5150 5151 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) { 5152 return getMinMaxPred(getInverseMinMaxFlavor(SPF)); 5153 } 5154 5155 /// Return true if "icmp Pred LHS RHS" is always true. 5156 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, 5157 const Value *RHS, const DataLayout &DL, 5158 unsigned Depth) { 5159 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 5160 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 5161 return true; 5162 5163 switch (Pred) { 5164 default: 5165 return false; 5166 5167 case CmpInst::ICMP_SLE: { 5168 const APInt *C; 5169 5170 // LHS s<= LHS +_{nsw} C if C >= 0 5171 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 5172 return !C->isNegative(); 5173 return false; 5174 } 5175 5176 case CmpInst::ICMP_ULE: { 5177 const APInt *C; 5178 5179 // LHS u<= LHS +_{nuw} C for any C 5180 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 5181 return true; 5182 5183 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 5184 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 5185 const Value *&X, 5186 const APInt *&CA, const APInt *&CB) { 5187 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 5188 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 5189 return true; 5190 5191 // If X & C == 0 then (X | C) == X +_{nuw} C 5192 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 5193 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 5194 KnownBits Known(CA->getBitWidth()); 5195 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, 5196 /*CxtI*/ nullptr, /*DT*/ nullptr); 5197 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 5198 return true; 5199 } 5200 5201 return false; 5202 }; 5203 5204 const Value *X; 5205 const APInt *CLHS, *CRHS; 5206 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 5207 return CLHS->ule(*CRHS); 5208 5209 return false; 5210 } 5211 } 5212 } 5213 5214 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 5215 /// ALHS ARHS" is true. Otherwise, return None. 5216 static Optional<bool> 5217 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 5218 const Value *ARHS, const Value *BLHS, const Value *BRHS, 5219 const DataLayout &DL, unsigned Depth) { 5220 switch (Pred) { 5221 default: 5222 return None; 5223 5224 case CmpInst::ICMP_SLT: 5225 case CmpInst::ICMP_SLE: 5226 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && 5227 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) 5228 return true; 5229 return None; 5230 5231 case CmpInst::ICMP_ULT: 5232 case CmpInst::ICMP_ULE: 5233 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && 5234 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) 5235 return true; 5236 return None; 5237 } 5238 } 5239 5240 /// Return true if the operands of the two compares match. IsSwappedOps is true 5241 /// when the operands match, but are swapped. 5242 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 5243 const Value *BLHS, const Value *BRHS, 5244 bool &IsSwappedOps) { 5245 5246 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 5247 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 5248 return IsMatchingOps || IsSwappedOps; 5249 } 5250 5251 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true. 5252 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false. 5253 /// Otherwise, return None if we can't infer anything. 5254 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 5255 CmpInst::Predicate BPred, 5256 bool AreSwappedOps) { 5257 // Canonicalize the predicate as if the operands were not commuted. 5258 if (AreSwappedOps) 5259 BPred = ICmpInst::getSwappedPredicate(BPred); 5260 5261 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 5262 return true; 5263 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 5264 return false; 5265 5266 return None; 5267 } 5268 5269 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true. 5270 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false. 5271 /// Otherwise, return None if we can't infer anything. 5272 static Optional<bool> 5273 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, 5274 const ConstantInt *C1, 5275 CmpInst::Predicate BPred, 5276 const ConstantInt *C2) { 5277 ConstantRange DomCR = 5278 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 5279 ConstantRange CR = 5280 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); 5281 ConstantRange Intersection = DomCR.intersectWith(CR); 5282 ConstantRange Difference = DomCR.difference(CR); 5283 if (Intersection.isEmptySet()) 5284 return false; 5285 if (Difference.isEmptySet()) 5286 return true; 5287 return None; 5288 } 5289 5290 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 5291 /// false. Otherwise, return None if we can't infer anything. 5292 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS, 5293 const ICmpInst *RHS, 5294 const DataLayout &DL, bool LHSIsTrue, 5295 unsigned Depth) { 5296 Value *ALHS = LHS->getOperand(0); 5297 Value *ARHS = LHS->getOperand(1); 5298 // The rest of the logic assumes the LHS condition is true. If that's not the 5299 // case, invert the predicate to make it so. 5300 ICmpInst::Predicate APred = 5301 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); 5302 5303 Value *BLHS = RHS->getOperand(0); 5304 Value *BRHS = RHS->getOperand(1); 5305 ICmpInst::Predicate BPred = RHS->getPredicate(); 5306 5307 // Can we infer anything when the two compares have matching operands? 5308 bool AreSwappedOps; 5309 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) { 5310 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 5311 APred, BPred, AreSwappedOps)) 5312 return Implication; 5313 // No amount of additional analysis will infer the second condition, so 5314 // early exit. 5315 return None; 5316 } 5317 5318 // Can we infer anything when the LHS operands match and the RHS operands are 5319 // constants (not necessarily matching)? 5320 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 5321 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 5322 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS))) 5323 return Implication; 5324 // No amount of additional analysis will infer the second condition, so 5325 // early exit. 5326 return None; 5327 } 5328 5329 if (APred == BPred) 5330 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth); 5331 return None; 5332 } 5333 5334 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 5335 /// false. Otherwise, return None if we can't infer anything. We expect the 5336 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction. 5337 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS, 5338 const ICmpInst *RHS, 5339 const DataLayout &DL, bool LHSIsTrue, 5340 unsigned Depth) { 5341 // The LHS must be an 'or' or an 'and' instruction. 5342 assert((LHS->getOpcode() == Instruction::And || 5343 LHS->getOpcode() == Instruction::Or) && 5344 "Expected LHS to be 'and' or 'or'."); 5345 5346 assert(Depth <= MaxDepth && "Hit recursion limit"); 5347 5348 // If the result of an 'or' is false, then we know both legs of the 'or' are 5349 // false. Similarly, if the result of an 'and' is true, then we know both 5350 // legs of the 'and' are true. 5351 Value *ALHS, *ARHS; 5352 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) || 5353 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) { 5354 // FIXME: Make this non-recursion. 5355 if (Optional<bool> Implication = 5356 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1)) 5357 return Implication; 5358 if (Optional<bool> Implication = 5359 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1)) 5360 return Implication; 5361 return None; 5362 } 5363 return None; 5364 } 5365 5366 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 5367 const DataLayout &DL, bool LHSIsTrue, 5368 unsigned Depth) { 5369 // Bail out when we hit the limit. 5370 if (Depth == MaxDepth) 5371 return None; 5372 5373 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for 5374 // example. 5375 if (LHS->getType() != RHS->getType()) 5376 return None; 5377 5378 Type *OpTy = LHS->getType(); 5379 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!"); 5380 5381 // LHS ==> RHS by definition 5382 if (LHS == RHS) 5383 return LHSIsTrue; 5384 5385 // FIXME: Extending the code below to handle vectors. 5386 if (OpTy->isVectorTy()) 5387 return None; 5388 5389 assert(OpTy->isIntegerTy(1) && "implied by above"); 5390 5391 // Both LHS and RHS are icmps. 5392 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); 5393 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS); 5394 if (LHSCmp && RHSCmp) 5395 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth); 5396 5397 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be 5398 // an icmp. FIXME: Add support for and/or on the RHS. 5399 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS); 5400 if (LHSBO && RHSCmp) { 5401 if ((LHSBO->getOpcode() == Instruction::And || 5402 LHSBO->getOpcode() == Instruction::Or)) 5403 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth); 5404 } 5405 return None; 5406 } 5407 5408 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond, 5409 const Instruction *ContextI, 5410 const DataLayout &DL) { 5411 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool"); 5412 if (!ContextI || !ContextI->getParent()) 5413 return None; 5414 5415 // TODO: This is a poor/cheap way to determine dominance. Should we use a 5416 // dominator tree (eg, from a SimplifyQuery) instead? 5417 const BasicBlock *ContextBB = ContextI->getParent(); 5418 const BasicBlock *PredBB = ContextBB->getSinglePredecessor(); 5419 if (!PredBB) 5420 return None; 5421 5422 // We need a conditional branch in the predecessor. 5423 Value *PredCond; 5424 BasicBlock *TrueBB, *FalseBB; 5425 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB))) 5426 return None; 5427 5428 // The branch should get simplified. Don't bother simplifying this condition. 5429 if (TrueBB == FalseBB) 5430 return None; 5431 5432 assert((TrueBB == ContextBB || FalseBB == ContextBB) && 5433 "Predecessor block does not point to successor?"); 5434 5435 // Is this condition implied by the predecessor condition? 5436 bool CondIsTrue = TrueBB == ContextBB; 5437 return isImpliedCondition(PredCond, Cond, DL, CondIsTrue); 5438 } 5439