1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains routines that help analyze properties that chains of 10 // computations have. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/ValueTracking.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/None.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/StringRef.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AssumptionCache.h" 28 #include "llvm/Analysis/GuardUtils.h" 29 #include "llvm/Analysis/InstructionSimplify.h" 30 #include "llvm/Analysis/Loads.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 33 #include "llvm/Analysis/TargetLibraryInfo.h" 34 #include "llvm/IR/Argument.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/CallSite.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/ConstantRange.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/DiagnosticInfo.h" 44 #include "llvm/IR/Dominators.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/GetElementPtrTypeIterator.h" 47 #include "llvm/IR/GlobalAlias.h" 48 #include "llvm/IR/GlobalValue.h" 49 #include "llvm/IR/GlobalVariable.h" 50 #include "llvm/IR/InstrTypes.h" 51 #include "llvm/IR/Instruction.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/IntrinsicInst.h" 54 #include "llvm/IR/Intrinsics.h" 55 #include "llvm/IR/LLVMContext.h" 56 #include "llvm/IR/Metadata.h" 57 #include "llvm/IR/Module.h" 58 #include "llvm/IR/Operator.h" 59 #include "llvm/IR/PatternMatch.h" 60 #include "llvm/IR/Type.h" 61 #include "llvm/IR/User.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/Support/Casting.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/Compiler.h" 66 #include "llvm/Support/ErrorHandling.h" 67 #include "llvm/Support/KnownBits.h" 68 #include "llvm/Support/MathExtras.h" 69 #include <algorithm> 70 #include <array> 71 #include <cassert> 72 #include <cstdint> 73 #include <iterator> 74 #include <utility> 75 76 using namespace llvm; 77 using namespace llvm::PatternMatch; 78 79 const unsigned MaxDepth = 6; 80 81 // Controls the number of uses of the value searched for possible 82 // dominating comparisons. 83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 84 cl::Hidden, cl::init(20)); 85 86 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 87 /// returns the element type's bitwidth. 88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 89 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 90 return BitWidth; 91 92 return DL.getIndexTypeSizeInBits(Ty); 93 } 94 95 namespace { 96 97 // Simplifying using an assume can only be done in a particular control-flow 98 // context (the context instruction provides that context). If an assume and 99 // the context instruction are not in the same block then the DT helps in 100 // figuring out if we can use it. 101 struct Query { 102 const DataLayout &DL; 103 AssumptionCache *AC; 104 const Instruction *CxtI; 105 const DominatorTree *DT; 106 107 // Unlike the other analyses, this may be a nullptr because not all clients 108 // provide it currently. 109 OptimizationRemarkEmitter *ORE; 110 111 /// Set of assumptions that should be excluded from further queries. 112 /// This is because of the potential for mutual recursion to cause 113 /// computeKnownBits to repeatedly visit the same assume intrinsic. The 114 /// classic case of this is assume(x = y), which will attempt to determine 115 /// bits in x from bits in y, which will attempt to determine bits in y from 116 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 117 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo 118 /// (all of which can call computeKnownBits), and so on. 119 std::array<const Value *, MaxDepth> Excluded; 120 121 /// If true, it is safe to use metadata during simplification. 122 InstrInfoQuery IIQ; 123 124 unsigned NumExcluded = 0; 125 126 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 127 const DominatorTree *DT, bool UseInstrInfo, 128 OptimizationRemarkEmitter *ORE = nullptr) 129 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {} 130 131 Query(const Query &Q, const Value *NewExcl) 132 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ), 133 NumExcluded(Q.NumExcluded) { 134 Excluded = Q.Excluded; 135 Excluded[NumExcluded++] = NewExcl; 136 assert(NumExcluded <= Excluded.size()); 137 } 138 139 bool isExcluded(const Value *Value) const { 140 if (NumExcluded == 0) 141 return false; 142 auto End = Excluded.begin() + NumExcluded; 143 return std::find(Excluded.begin(), End, Value) != End; 144 } 145 }; 146 147 } // end anonymous namespace 148 149 // Given the provided Value and, potentially, a context instruction, return 150 // the preferred context instruction (if any). 151 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 152 // If we've been provided with a context instruction, then use that (provided 153 // it has been inserted). 154 if (CxtI && CxtI->getParent()) 155 return CxtI; 156 157 // If the value is really an already-inserted instruction, then use that. 158 CxtI = dyn_cast<Instruction>(V); 159 if (CxtI && CxtI->getParent()) 160 return CxtI; 161 162 return nullptr; 163 } 164 165 static void computeKnownBits(const Value *V, KnownBits &Known, 166 unsigned Depth, const Query &Q); 167 168 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 169 const DataLayout &DL, unsigned Depth, 170 AssumptionCache *AC, const Instruction *CxtI, 171 const DominatorTree *DT, 172 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { 173 ::computeKnownBits(V, Known, Depth, 174 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 175 } 176 177 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 178 const Query &Q); 179 180 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 181 unsigned Depth, AssumptionCache *AC, 182 const Instruction *CxtI, 183 const DominatorTree *DT, 184 OptimizationRemarkEmitter *ORE, 185 bool UseInstrInfo) { 186 return ::computeKnownBits( 187 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 188 } 189 190 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 191 const DataLayout &DL, AssumptionCache *AC, 192 const Instruction *CxtI, const DominatorTree *DT, 193 bool UseInstrInfo) { 194 assert(LHS->getType() == RHS->getType() && 195 "LHS and RHS should have the same type"); 196 assert(LHS->getType()->isIntOrIntVectorTy() && 197 "LHS and RHS should be integers"); 198 // Look for an inverted mask: (X & ~M) op (Y & M). 199 Value *M; 200 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 201 match(RHS, m_c_And(m_Specific(M), m_Value()))) 202 return true; 203 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 204 match(LHS, m_c_And(m_Specific(M), m_Value()))) 205 return true; 206 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 207 KnownBits LHSKnown(IT->getBitWidth()); 208 KnownBits RHSKnown(IT->getBitWidth()); 209 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 210 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 211 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue(); 212 } 213 214 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) { 215 for (const User *U : CxtI->users()) { 216 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) 217 if (IC->isEquality()) 218 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1))) 219 if (C->isNullValue()) 220 continue; 221 return false; 222 } 223 return true; 224 } 225 226 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 227 const Query &Q); 228 229 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 230 bool OrZero, unsigned Depth, 231 AssumptionCache *AC, const Instruction *CxtI, 232 const DominatorTree *DT, bool UseInstrInfo) { 233 return ::isKnownToBeAPowerOfTwo( 234 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 235 } 236 237 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 238 239 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 240 AssumptionCache *AC, const Instruction *CxtI, 241 const DominatorTree *DT, bool UseInstrInfo) { 242 return ::isKnownNonZero(V, Depth, 243 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 244 } 245 246 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 247 unsigned Depth, AssumptionCache *AC, 248 const Instruction *CxtI, const DominatorTree *DT, 249 bool UseInstrInfo) { 250 KnownBits Known = 251 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 252 return Known.isNonNegative(); 253 } 254 255 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 256 AssumptionCache *AC, const Instruction *CxtI, 257 const DominatorTree *DT, bool UseInstrInfo) { 258 if (auto *CI = dyn_cast<ConstantInt>(V)) 259 return CI->getValue().isStrictlyPositive(); 260 261 // TODO: We'd doing two recursive queries here. We should factor this such 262 // that only a single query is needed. 263 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) && 264 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); 265 } 266 267 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 268 AssumptionCache *AC, const Instruction *CxtI, 269 const DominatorTree *DT, bool UseInstrInfo) { 270 KnownBits Known = 271 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 272 return Known.isNegative(); 273 } 274 275 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); 276 277 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 278 const DataLayout &DL, AssumptionCache *AC, 279 const Instruction *CxtI, const DominatorTree *DT, 280 bool UseInstrInfo) { 281 return ::isKnownNonEqual(V1, V2, 282 Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT, 283 UseInstrInfo, /*ORE=*/nullptr)); 284 } 285 286 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 287 const Query &Q); 288 289 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 290 const DataLayout &DL, unsigned Depth, 291 AssumptionCache *AC, const Instruction *CxtI, 292 const DominatorTree *DT, bool UseInstrInfo) { 293 return ::MaskedValueIsZero( 294 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 295 } 296 297 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 298 const Query &Q); 299 300 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 301 unsigned Depth, AssumptionCache *AC, 302 const Instruction *CxtI, 303 const DominatorTree *DT, bool UseInstrInfo) { 304 return ::ComputeNumSignBits( 305 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 306 } 307 308 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 309 bool NSW, 310 KnownBits &KnownOut, KnownBits &Known2, 311 unsigned Depth, const Query &Q) { 312 unsigned BitWidth = KnownOut.getBitWidth(); 313 314 // If an initial sequence of bits in the result is not needed, the 315 // corresponding bits in the operands are not needed. 316 KnownBits LHSKnown(BitWidth); 317 computeKnownBits(Op0, LHSKnown, Depth + 1, Q); 318 computeKnownBits(Op1, Known2, Depth + 1, Q); 319 320 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2); 321 } 322 323 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 324 KnownBits &Known, KnownBits &Known2, 325 unsigned Depth, const Query &Q) { 326 unsigned BitWidth = Known.getBitWidth(); 327 computeKnownBits(Op1, Known, Depth + 1, Q); 328 computeKnownBits(Op0, Known2, Depth + 1, Q); 329 330 bool isKnownNegative = false; 331 bool isKnownNonNegative = false; 332 // If the multiplication is known not to overflow, compute the sign bit. 333 if (NSW) { 334 if (Op0 == Op1) { 335 // The product of a number with itself is non-negative. 336 isKnownNonNegative = true; 337 } else { 338 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 339 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 340 bool isKnownNegativeOp1 = Known.isNegative(); 341 bool isKnownNegativeOp0 = Known2.isNegative(); 342 // The product of two numbers with the same sign is non-negative. 343 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 344 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 345 // The product of a negative number and a non-negative number is either 346 // negative or zero. 347 if (!isKnownNonNegative) 348 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 349 isKnownNonZero(Op0, Depth, Q)) || 350 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 351 isKnownNonZero(Op1, Depth, Q)); 352 } 353 } 354 355 assert(!Known.hasConflict() && !Known2.hasConflict()); 356 // Compute a conservative estimate for high known-0 bits. 357 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 358 Known2.countMinLeadingZeros(), 359 BitWidth) - BitWidth; 360 LeadZ = std::min(LeadZ, BitWidth); 361 362 // The result of the bottom bits of an integer multiply can be 363 // inferred by looking at the bottom bits of both operands and 364 // multiplying them together. 365 // We can infer at least the minimum number of known trailing bits 366 // of both operands. Depending on number of trailing zeros, we can 367 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming 368 // a and b are divisible by m and n respectively. 369 // We then calculate how many of those bits are inferrable and set 370 // the output. For example, the i8 mul: 371 // a = XXXX1100 (12) 372 // b = XXXX1110 (14) 373 // We know the bottom 3 bits are zero since the first can be divided by 374 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4). 375 // Applying the multiplication to the trimmed arguments gets: 376 // XX11 (3) 377 // X111 (7) 378 // ------- 379 // XX11 380 // XX11 381 // XX11 382 // XX11 383 // ------- 384 // XXXXX01 385 // Which allows us to infer the 2 LSBs. Since we're multiplying the result 386 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits. 387 // The proof for this can be described as: 388 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) && 389 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) + 390 // umin(countTrailingZeros(C2), C6) + 391 // umin(C5 - umin(countTrailingZeros(C1), C5), 392 // C6 - umin(countTrailingZeros(C2), C6)))) - 1) 393 // %aa = shl i8 %a, C5 394 // %bb = shl i8 %b, C6 395 // %aaa = or i8 %aa, C1 396 // %bbb = or i8 %bb, C2 397 // %mul = mul i8 %aaa, %bbb 398 // %mask = and i8 %mul, C7 399 // => 400 // %mask = i8 ((C1*C2)&C7) 401 // Where C5, C6 describe the known bits of %a, %b 402 // C1, C2 describe the known bottom bits of %a, %b. 403 // C7 describes the mask of the known bits of the result. 404 APInt Bottom0 = Known.One; 405 APInt Bottom1 = Known2.One; 406 407 // How many times we'd be able to divide each argument by 2 (shr by 1). 408 // This gives us the number of trailing zeros on the multiplication result. 409 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes(); 410 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes(); 411 unsigned TrailZero0 = Known.countMinTrailingZeros(); 412 unsigned TrailZero1 = Known2.countMinTrailingZeros(); 413 unsigned TrailZ = TrailZero0 + TrailZero1; 414 415 // Figure out the fewest known-bits operand. 416 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0, 417 TrailBitsKnown1 - TrailZero1); 418 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth); 419 420 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) * 421 Bottom1.getLoBits(TrailBitsKnown1); 422 423 Known.resetAll(); 424 Known.Zero.setHighBits(LeadZ); 425 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown); 426 Known.One |= BottomKnown.getLoBits(ResultBitsKnown); 427 428 // Only make use of no-wrap flags if we failed to compute the sign bit 429 // directly. This matters if the multiplication always overflows, in 430 // which case we prefer to follow the result of the direct computation, 431 // though as the program is invoking undefined behaviour we can choose 432 // whatever we like here. 433 if (isKnownNonNegative && !Known.isNegative()) 434 Known.makeNonNegative(); 435 else if (isKnownNegative && !Known.isNonNegative()) 436 Known.makeNegative(); 437 } 438 439 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 440 KnownBits &Known) { 441 unsigned BitWidth = Known.getBitWidth(); 442 unsigned NumRanges = Ranges.getNumOperands() / 2; 443 assert(NumRanges >= 1); 444 445 Known.Zero.setAllBits(); 446 Known.One.setAllBits(); 447 448 for (unsigned i = 0; i < NumRanges; ++i) { 449 ConstantInt *Lower = 450 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 451 ConstantInt *Upper = 452 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 453 ConstantRange Range(Lower->getValue(), Upper->getValue()); 454 455 // The first CommonPrefixBits of all values in Range are equal. 456 unsigned CommonPrefixBits = 457 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 458 459 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 460 Known.One &= Range.getUnsignedMax() & Mask; 461 Known.Zero &= ~Range.getUnsignedMax() & Mask; 462 } 463 } 464 465 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 466 SmallVector<const Value *, 16> WorkSet(1, I); 467 SmallPtrSet<const Value *, 32> Visited; 468 SmallPtrSet<const Value *, 16> EphValues; 469 470 // The instruction defining an assumption's condition itself is always 471 // considered ephemeral to that assumption (even if it has other 472 // non-ephemeral users). See r246696's test case for an example. 473 if (is_contained(I->operands(), E)) 474 return true; 475 476 while (!WorkSet.empty()) { 477 const Value *V = WorkSet.pop_back_val(); 478 if (!Visited.insert(V).second) 479 continue; 480 481 // If all uses of this value are ephemeral, then so is this value. 482 if (llvm::all_of(V->users(), [&](const User *U) { 483 return EphValues.count(U); 484 })) { 485 if (V == E) 486 return true; 487 488 if (V == I || isSafeToSpeculativelyExecute(V)) { 489 EphValues.insert(V); 490 if (const User *U = dyn_cast<User>(V)) 491 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 492 J != JE; ++J) 493 WorkSet.push_back(*J); 494 } 495 } 496 } 497 498 return false; 499 } 500 501 // Is this an intrinsic that cannot be speculated but also cannot trap? 502 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { 503 if (const CallInst *CI = dyn_cast<CallInst>(I)) 504 if (Function *F = CI->getCalledFunction()) 505 switch (F->getIntrinsicID()) { 506 default: break; 507 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 508 case Intrinsic::assume: 509 case Intrinsic::sideeffect: 510 case Intrinsic::dbg_declare: 511 case Intrinsic::dbg_value: 512 case Intrinsic::dbg_label: 513 case Intrinsic::invariant_start: 514 case Intrinsic::invariant_end: 515 case Intrinsic::lifetime_start: 516 case Intrinsic::lifetime_end: 517 case Intrinsic::objectsize: 518 case Intrinsic::ptr_annotation: 519 case Intrinsic::var_annotation: 520 return true; 521 } 522 523 return false; 524 } 525 526 bool llvm::isValidAssumeForContext(const Instruction *Inv, 527 const Instruction *CxtI, 528 const DominatorTree *DT) { 529 // There are two restrictions on the use of an assume: 530 // 1. The assume must dominate the context (or the control flow must 531 // reach the assume whenever it reaches the context). 532 // 2. The context must not be in the assume's set of ephemeral values 533 // (otherwise we will use the assume to prove that the condition 534 // feeding the assume is trivially true, thus causing the removal of 535 // the assume). 536 537 if (DT) { 538 if (DT->dominates(Inv, CxtI)) 539 return true; 540 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 541 // We don't have a DT, but this trivially dominates. 542 return true; 543 } 544 545 // With or without a DT, the only remaining case we will check is if the 546 // instructions are in the same BB. Give up if that is not the case. 547 if (Inv->getParent() != CxtI->getParent()) 548 return false; 549 550 // If we have a dom tree, then we now know that the assume doesn't dominate 551 // the other instruction. If we don't have a dom tree then we can check if 552 // the assume is first in the BB. 553 if (!DT) { 554 // Search forward from the assume until we reach the context (or the end 555 // of the block); the common case is that the assume will come first. 556 for (auto I = std::next(BasicBlock::const_iterator(Inv)), 557 IE = Inv->getParent()->end(); I != IE; ++I) 558 if (&*I == CxtI) 559 return true; 560 } 561 562 // The context comes first, but they're both in the same block. Make sure 563 // there is nothing in between that might interrupt the control flow. 564 for (BasicBlock::const_iterator I = 565 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv); 566 I != IE; ++I) 567 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 568 return false; 569 570 return !isEphemeralValueOf(Inv, CxtI); 571 } 572 573 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 574 unsigned Depth, const Query &Q) { 575 // Use of assumptions is context-sensitive. If we don't have a context, we 576 // cannot use them! 577 if (!Q.AC || !Q.CxtI) 578 return; 579 580 unsigned BitWidth = Known.getBitWidth(); 581 582 // Note that the patterns below need to be kept in sync with the code 583 // in AssumptionCache::updateAffectedValues. 584 585 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 586 if (!AssumeVH) 587 continue; 588 CallInst *I = cast<CallInst>(AssumeVH); 589 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 590 "Got assumption for the wrong function!"); 591 if (Q.isExcluded(I)) 592 continue; 593 594 // Warning: This loop can end up being somewhat performance sensitive. 595 // We're running this loop for once for each value queried resulting in a 596 // runtime of ~O(#assumes * #values). 597 598 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 599 "must be an assume intrinsic"); 600 601 Value *Arg = I->getArgOperand(0); 602 603 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 604 assert(BitWidth == 1 && "assume operand is not i1?"); 605 Known.setAllOnes(); 606 return; 607 } 608 if (match(Arg, m_Not(m_Specific(V))) && 609 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 610 assert(BitWidth == 1 && "assume operand is not i1?"); 611 Known.setAllZero(); 612 return; 613 } 614 615 // The remaining tests are all recursive, so bail out if we hit the limit. 616 if (Depth == MaxDepth) 617 continue; 618 619 Value *A, *B; 620 auto m_V = m_CombineOr(m_Specific(V), 621 m_CombineOr(m_PtrToInt(m_Specific(V)), 622 m_BitCast(m_Specific(V)))); 623 624 CmpInst::Predicate Pred; 625 uint64_t C; 626 // assume(v = a) 627 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 628 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 629 KnownBits RHSKnown(BitWidth); 630 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 631 Known.Zero |= RHSKnown.Zero; 632 Known.One |= RHSKnown.One; 633 // assume(v & b = a) 634 } else if (match(Arg, 635 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 636 Pred == ICmpInst::ICMP_EQ && 637 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 638 KnownBits RHSKnown(BitWidth); 639 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 640 KnownBits MaskKnown(BitWidth); 641 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 642 643 // For those bits in the mask that are known to be one, we can propagate 644 // known bits from the RHS to V. 645 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 646 Known.One |= RHSKnown.One & MaskKnown.One; 647 // assume(~(v & b) = a) 648 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 649 m_Value(A))) && 650 Pred == ICmpInst::ICMP_EQ && 651 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 652 KnownBits RHSKnown(BitWidth); 653 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 654 KnownBits MaskKnown(BitWidth); 655 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 656 657 // For those bits in the mask that are known to be one, we can propagate 658 // inverted known bits from the RHS to V. 659 Known.Zero |= RHSKnown.One & MaskKnown.One; 660 Known.One |= RHSKnown.Zero & MaskKnown.One; 661 // assume(v | b = a) 662 } else if (match(Arg, 663 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 664 Pred == ICmpInst::ICMP_EQ && 665 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 666 KnownBits RHSKnown(BitWidth); 667 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 668 KnownBits BKnown(BitWidth); 669 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 670 671 // For those bits in B that are known to be zero, we can propagate known 672 // bits from the RHS to V. 673 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 674 Known.One |= RHSKnown.One & BKnown.Zero; 675 // assume(~(v | b) = a) 676 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 677 m_Value(A))) && 678 Pred == ICmpInst::ICMP_EQ && 679 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 680 KnownBits RHSKnown(BitWidth); 681 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 682 KnownBits BKnown(BitWidth); 683 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 684 685 // For those bits in B that are known to be zero, we can propagate 686 // inverted known bits from the RHS to V. 687 Known.Zero |= RHSKnown.One & BKnown.Zero; 688 Known.One |= RHSKnown.Zero & BKnown.Zero; 689 // assume(v ^ b = a) 690 } else if (match(Arg, 691 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 692 Pred == ICmpInst::ICMP_EQ && 693 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 694 KnownBits RHSKnown(BitWidth); 695 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 696 KnownBits BKnown(BitWidth); 697 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 698 699 // For those bits in B that are known to be zero, we can propagate known 700 // bits from the RHS to V. For those bits in B that are known to be one, 701 // we can propagate inverted known bits from the RHS to V. 702 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 703 Known.One |= RHSKnown.One & BKnown.Zero; 704 Known.Zero |= RHSKnown.One & BKnown.One; 705 Known.One |= RHSKnown.Zero & BKnown.One; 706 // assume(~(v ^ b) = a) 707 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 708 m_Value(A))) && 709 Pred == ICmpInst::ICMP_EQ && 710 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 711 KnownBits RHSKnown(BitWidth); 712 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 713 KnownBits BKnown(BitWidth); 714 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 715 716 // For those bits in B that are known to be zero, we can propagate 717 // inverted known bits from the RHS to V. For those bits in B that are 718 // known to be one, we can propagate known bits from the RHS to V. 719 Known.Zero |= RHSKnown.One & BKnown.Zero; 720 Known.One |= RHSKnown.Zero & BKnown.Zero; 721 Known.Zero |= RHSKnown.Zero & BKnown.One; 722 Known.One |= RHSKnown.One & BKnown.One; 723 // assume(v << c = a) 724 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 725 m_Value(A))) && 726 Pred == ICmpInst::ICMP_EQ && 727 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 728 C < BitWidth) { 729 KnownBits RHSKnown(BitWidth); 730 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 731 // For those bits in RHS that are known, we can propagate them to known 732 // bits in V shifted to the right by C. 733 RHSKnown.Zero.lshrInPlace(C); 734 Known.Zero |= RHSKnown.Zero; 735 RHSKnown.One.lshrInPlace(C); 736 Known.One |= RHSKnown.One; 737 // assume(~(v << c) = a) 738 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 739 m_Value(A))) && 740 Pred == ICmpInst::ICMP_EQ && 741 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 742 C < BitWidth) { 743 KnownBits RHSKnown(BitWidth); 744 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 745 // For those bits in RHS that are known, we can propagate them inverted 746 // to known bits in V shifted to the right by C. 747 RHSKnown.One.lshrInPlace(C); 748 Known.Zero |= RHSKnown.One; 749 RHSKnown.Zero.lshrInPlace(C); 750 Known.One |= RHSKnown.Zero; 751 // assume(v >> c = a) 752 } else if (match(Arg, 753 m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), 754 m_Value(A))) && 755 Pred == ICmpInst::ICMP_EQ && 756 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 757 C < BitWidth) { 758 KnownBits RHSKnown(BitWidth); 759 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 760 // For those bits in RHS that are known, we can propagate them to known 761 // bits in V shifted to the right by C. 762 Known.Zero |= RHSKnown.Zero << C; 763 Known.One |= RHSKnown.One << C; 764 // assume(~(v >> c) = a) 765 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), 766 m_Value(A))) && 767 Pred == ICmpInst::ICMP_EQ && 768 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 769 C < BitWidth) { 770 KnownBits RHSKnown(BitWidth); 771 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 772 // For those bits in RHS that are known, we can propagate them inverted 773 // to known bits in V shifted to the right by C. 774 Known.Zero |= RHSKnown.One << C; 775 Known.One |= RHSKnown.Zero << C; 776 // assume(v >=_s c) where c is non-negative 777 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 778 Pred == ICmpInst::ICMP_SGE && 779 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 780 KnownBits RHSKnown(BitWidth); 781 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 782 783 if (RHSKnown.isNonNegative()) { 784 // We know that the sign bit is zero. 785 Known.makeNonNegative(); 786 } 787 // assume(v >_s c) where c is at least -1. 788 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 789 Pred == ICmpInst::ICMP_SGT && 790 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 791 KnownBits RHSKnown(BitWidth); 792 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 793 794 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { 795 // We know that the sign bit is zero. 796 Known.makeNonNegative(); 797 } 798 // assume(v <=_s c) where c is negative 799 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 800 Pred == ICmpInst::ICMP_SLE && 801 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 802 KnownBits RHSKnown(BitWidth); 803 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 804 805 if (RHSKnown.isNegative()) { 806 // We know that the sign bit is one. 807 Known.makeNegative(); 808 } 809 // assume(v <_s c) where c is non-positive 810 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 811 Pred == ICmpInst::ICMP_SLT && 812 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 813 KnownBits RHSKnown(BitWidth); 814 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 815 816 if (RHSKnown.isZero() || RHSKnown.isNegative()) { 817 // We know that the sign bit is one. 818 Known.makeNegative(); 819 } 820 // assume(v <=_u c) 821 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 822 Pred == ICmpInst::ICMP_ULE && 823 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 824 KnownBits RHSKnown(BitWidth); 825 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 826 827 // Whatever high bits in c are zero are known to be zero. 828 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 829 // assume(v <_u c) 830 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 831 Pred == ICmpInst::ICMP_ULT && 832 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 833 KnownBits RHSKnown(BitWidth); 834 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 835 836 // If the RHS is known zero, then this assumption must be wrong (nothing 837 // is unsigned less than zero). Signal a conflict and get out of here. 838 if (RHSKnown.isZero()) { 839 Known.Zero.setAllBits(); 840 Known.One.setAllBits(); 841 break; 842 } 843 844 // Whatever high bits in c are zero are known to be zero (if c is a power 845 // of 2, then one more). 846 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) 847 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); 848 else 849 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 850 } 851 } 852 853 // If assumptions conflict with each other or previous known bits, then we 854 // have a logical fallacy. It's possible that the assumption is not reachable, 855 // so this isn't a real bug. On the other hand, the program may have undefined 856 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 857 // clear out the known bits, try to warn the user, and hope for the best. 858 if (Known.Zero.intersects(Known.One)) { 859 Known.resetAll(); 860 861 if (Q.ORE) 862 Q.ORE->emit([&]() { 863 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 864 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", 865 CxtI) 866 << "Detected conflicting code assumptions. Program may " 867 "have undefined behavior, or compiler may have " 868 "internal error."; 869 }); 870 } 871 } 872 873 /// Compute known bits from a shift operator, including those with a 874 /// non-constant shift amount. Known is the output of this function. Known2 is a 875 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are 876 /// operator-specific functions that, given the known-zero or known-one bits 877 /// respectively, and a shift amount, compute the implied known-zero or 878 /// known-one bits of the shift operator's result respectively for that shift 879 /// amount. The results from calling KZF and KOF are conservatively combined for 880 /// all permitted shift amounts. 881 static void computeKnownBitsFromShiftOperator( 882 const Operator *I, KnownBits &Known, KnownBits &Known2, 883 unsigned Depth, const Query &Q, 884 function_ref<APInt(const APInt &, unsigned)> KZF, 885 function_ref<APInt(const APInt &, unsigned)> KOF) { 886 unsigned BitWidth = Known.getBitWidth(); 887 888 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 889 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); 890 891 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 892 Known.Zero = KZF(Known.Zero, ShiftAmt); 893 Known.One = KOF(Known.One, ShiftAmt); 894 // If the known bits conflict, this must be an overflowing left shift, so 895 // the shift result is poison. We can return anything we want. Choose 0 for 896 // the best folding opportunity. 897 if (Known.hasConflict()) 898 Known.setAllZero(); 899 900 return; 901 } 902 903 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 904 905 // If the shift amount could be greater than or equal to the bit-width of the 906 // LHS, the value could be poison, but bail out because the check below is 907 // expensive. TODO: Should we just carry on? 908 if ((~Known.Zero).uge(BitWidth)) { 909 Known.resetAll(); 910 return; 911 } 912 913 // Note: We cannot use Known.Zero.getLimitedValue() here, because if 914 // BitWidth > 64 and any upper bits are known, we'll end up returning the 915 // limit value (which implies all bits are known). 916 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); 917 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); 918 919 // It would be more-clearly correct to use the two temporaries for this 920 // calculation. Reusing the APInts here to prevent unnecessary allocations. 921 Known.resetAll(); 922 923 // If we know the shifter operand is nonzero, we can sometimes infer more 924 // known bits. However this is expensive to compute, so be lazy about it and 925 // only compute it when absolutely necessary. 926 Optional<bool> ShifterOperandIsNonZero; 927 928 // Early exit if we can't constrain any well-defined shift amount. 929 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && 930 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { 931 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q); 932 if (!*ShifterOperandIsNonZero) 933 return; 934 } 935 936 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 937 938 Known.Zero.setAllBits(); 939 Known.One.setAllBits(); 940 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 941 // Combine the shifted known input bits only for those shift amounts 942 // compatible with its known constraints. 943 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 944 continue; 945 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 946 continue; 947 // If we know the shifter is nonzero, we may be able to infer more known 948 // bits. This check is sunk down as far as possible to avoid the expensive 949 // call to isKnownNonZero if the cheaper checks above fail. 950 if (ShiftAmt == 0) { 951 if (!ShifterOperandIsNonZero.hasValue()) 952 ShifterOperandIsNonZero = 953 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 954 if (*ShifterOperandIsNonZero) 955 continue; 956 } 957 958 Known.Zero &= KZF(Known2.Zero, ShiftAmt); 959 Known.One &= KOF(Known2.One, ShiftAmt); 960 } 961 962 // If the known bits conflict, the result is poison. Return a 0 and hope the 963 // caller can further optimize that. 964 if (Known.hasConflict()) 965 Known.setAllZero(); 966 } 967 968 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known, 969 unsigned Depth, const Query &Q) { 970 unsigned BitWidth = Known.getBitWidth(); 971 972 KnownBits Known2(Known); 973 switch (I->getOpcode()) { 974 default: break; 975 case Instruction::Load: 976 if (MDNode *MD = 977 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range)) 978 computeKnownBitsFromRangeMetadata(*MD, Known); 979 break; 980 case Instruction::And: { 981 // If either the LHS or the RHS are Zero, the result is zero. 982 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 983 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 984 985 // Output known-1 bits are only known if set in both the LHS & RHS. 986 Known.One &= Known2.One; 987 // Output known-0 are known to be clear if zero in either the LHS | RHS. 988 Known.Zero |= Known2.Zero; 989 990 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 991 // here we handle the more general case of adding any odd number by 992 // matching the form add(x, add(x, y)) where y is odd. 993 // TODO: This could be generalized to clearing any bit set in y where the 994 // following bit is known to be unset in y. 995 Value *X = nullptr, *Y = nullptr; 996 if (!Known.Zero[0] && !Known.One[0] && 997 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) { 998 Known2.resetAll(); 999 computeKnownBits(Y, Known2, Depth + 1, Q); 1000 if (Known2.countMinTrailingOnes() > 0) 1001 Known.Zero.setBit(0); 1002 } 1003 break; 1004 } 1005 case Instruction::Or: 1006 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 1007 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1008 1009 // Output known-0 bits are only known if clear in both the LHS & RHS. 1010 Known.Zero &= Known2.Zero; 1011 // Output known-1 are known to be set if set in either the LHS | RHS. 1012 Known.One |= Known2.One; 1013 break; 1014 case Instruction::Xor: { 1015 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 1016 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1017 1018 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1019 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 1020 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1021 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 1022 Known.Zero = std::move(KnownZeroOut); 1023 break; 1024 } 1025 case Instruction::Mul: { 1026 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1027 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known, 1028 Known2, Depth, Q); 1029 break; 1030 } 1031 case Instruction::UDiv: { 1032 // For the purposes of computing leading zeros we can conservatively 1033 // treat a udiv as a logical right shift by the power of 2 known to 1034 // be less than the denominator. 1035 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1036 unsigned LeadZ = Known2.countMinLeadingZeros(); 1037 1038 Known2.resetAll(); 1039 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1040 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 1041 if (RHSMaxLeadingZeros != BitWidth) 1042 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 1043 1044 Known.Zero.setHighBits(LeadZ); 1045 break; 1046 } 1047 case Instruction::Select: { 1048 const Value *LHS, *RHS; 1049 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 1050 if (SelectPatternResult::isMinOrMax(SPF)) { 1051 computeKnownBits(RHS, Known, Depth + 1, Q); 1052 computeKnownBits(LHS, Known2, Depth + 1, Q); 1053 } else { 1054 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 1055 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1056 } 1057 1058 unsigned MaxHighOnes = 0; 1059 unsigned MaxHighZeros = 0; 1060 if (SPF == SPF_SMAX) { 1061 // If both sides are negative, the result is negative. 1062 if (Known.isNegative() && Known2.isNegative()) 1063 // We can derive a lower bound on the result by taking the max of the 1064 // leading one bits. 1065 MaxHighOnes = 1066 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1067 // If either side is non-negative, the result is non-negative. 1068 else if (Known.isNonNegative() || Known2.isNonNegative()) 1069 MaxHighZeros = 1; 1070 } else if (SPF == SPF_SMIN) { 1071 // If both sides are non-negative, the result is non-negative. 1072 if (Known.isNonNegative() && Known2.isNonNegative()) 1073 // We can derive an upper bound on the result by taking the max of the 1074 // leading zero bits. 1075 MaxHighZeros = std::max(Known.countMinLeadingZeros(), 1076 Known2.countMinLeadingZeros()); 1077 // If either side is negative, the result is negative. 1078 else if (Known.isNegative() || Known2.isNegative()) 1079 MaxHighOnes = 1; 1080 } else if (SPF == SPF_UMAX) { 1081 // We can derive a lower bound on the result by taking the max of the 1082 // leading one bits. 1083 MaxHighOnes = 1084 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1085 } else if (SPF == SPF_UMIN) { 1086 // We can derive an upper bound on the result by taking the max of the 1087 // leading zero bits. 1088 MaxHighZeros = 1089 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1090 } else if (SPF == SPF_ABS) { 1091 // RHS from matchSelectPattern returns the negation part of abs pattern. 1092 // If the negate has an NSW flag we can assume the sign bit of the result 1093 // will be 0 because that makes abs(INT_MIN) undefined. 1094 if (Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) 1095 MaxHighZeros = 1; 1096 } 1097 1098 // Only known if known in both the LHS and RHS. 1099 Known.One &= Known2.One; 1100 Known.Zero &= Known2.Zero; 1101 if (MaxHighOnes > 0) 1102 Known.One.setHighBits(MaxHighOnes); 1103 if (MaxHighZeros > 0) 1104 Known.Zero.setHighBits(MaxHighZeros); 1105 break; 1106 } 1107 case Instruction::FPTrunc: 1108 case Instruction::FPExt: 1109 case Instruction::FPToUI: 1110 case Instruction::FPToSI: 1111 case Instruction::SIToFP: 1112 case Instruction::UIToFP: 1113 break; // Can't work with floating point. 1114 case Instruction::PtrToInt: 1115 case Instruction::IntToPtr: 1116 // Fall through and handle them the same as zext/trunc. 1117 LLVM_FALLTHROUGH; 1118 case Instruction::ZExt: 1119 case Instruction::Trunc: { 1120 Type *SrcTy = I->getOperand(0)->getType(); 1121 1122 unsigned SrcBitWidth; 1123 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1124 // which fall through here. 1125 Type *ScalarTy = SrcTy->getScalarType(); 1126 SrcBitWidth = ScalarTy->isPointerTy() ? 1127 Q.DL.getIndexTypeSizeInBits(ScalarTy) : 1128 Q.DL.getTypeSizeInBits(ScalarTy); 1129 1130 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1131 Known = Known.zextOrTrunc(SrcBitWidth); 1132 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1133 Known = Known.zextOrTrunc(BitWidth); 1134 // Any top bits are known to be zero. 1135 if (BitWidth > SrcBitWidth) 1136 Known.Zero.setBitsFrom(SrcBitWidth); 1137 break; 1138 } 1139 case Instruction::BitCast: { 1140 Type *SrcTy = I->getOperand(0)->getType(); 1141 if (SrcTy->isIntOrPtrTy() && 1142 // TODO: For now, not handling conversions like: 1143 // (bitcast i64 %x to <2 x i32>) 1144 !I->getType()->isVectorTy()) { 1145 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1146 break; 1147 } 1148 break; 1149 } 1150 case Instruction::SExt: { 1151 // Compute the bits in the result that are not present in the input. 1152 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1153 1154 Known = Known.trunc(SrcBitWidth); 1155 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1156 // If the sign bit of the input is known set or clear, then we know the 1157 // top bits of the result. 1158 Known = Known.sext(BitWidth); 1159 break; 1160 } 1161 case Instruction::Shl: { 1162 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1163 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1164 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) { 1165 APInt KZResult = KnownZero << ShiftAmt; 1166 KZResult.setLowBits(ShiftAmt); // Low bits known 0. 1167 // If this shift has "nsw" keyword, then the result is either a poison 1168 // value or has the same sign bit as the first operand. 1169 if (NSW && KnownZero.isSignBitSet()) 1170 KZResult.setSignBit(); 1171 return KZResult; 1172 }; 1173 1174 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) { 1175 APInt KOResult = KnownOne << ShiftAmt; 1176 if (NSW && KnownOne.isSignBitSet()) 1177 KOResult.setSignBit(); 1178 return KOResult; 1179 }; 1180 1181 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1182 break; 1183 } 1184 case Instruction::LShr: { 1185 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1186 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1187 APInt KZResult = KnownZero.lshr(ShiftAmt); 1188 // High bits known zero. 1189 KZResult.setHighBits(ShiftAmt); 1190 return KZResult; 1191 }; 1192 1193 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1194 return KnownOne.lshr(ShiftAmt); 1195 }; 1196 1197 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1198 break; 1199 } 1200 case Instruction::AShr: { 1201 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1202 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1203 return KnownZero.ashr(ShiftAmt); 1204 }; 1205 1206 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1207 return KnownOne.ashr(ShiftAmt); 1208 }; 1209 1210 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1211 break; 1212 } 1213 case Instruction::Sub: { 1214 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1215 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1216 Known, Known2, Depth, Q); 1217 break; 1218 } 1219 case Instruction::Add: { 1220 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1221 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1222 Known, Known2, Depth, Q); 1223 break; 1224 } 1225 case Instruction::SRem: 1226 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1227 APInt RA = Rem->getValue().abs(); 1228 if (RA.isPowerOf2()) { 1229 APInt LowBits = RA - 1; 1230 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1231 1232 // The low bits of the first operand are unchanged by the srem. 1233 Known.Zero = Known2.Zero & LowBits; 1234 Known.One = Known2.One & LowBits; 1235 1236 // If the first operand is non-negative or has all low bits zero, then 1237 // the upper bits are all zero. 1238 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 1239 Known.Zero |= ~LowBits; 1240 1241 // If the first operand is negative and not all low bits are zero, then 1242 // the upper bits are all one. 1243 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 1244 Known.One |= ~LowBits; 1245 1246 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1247 break; 1248 } 1249 } 1250 1251 // The sign bit is the LHS's sign bit, except when the result of the 1252 // remainder is zero. 1253 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1254 // If it's known zero, our sign bit is also zero. 1255 if (Known2.isNonNegative()) 1256 Known.makeNonNegative(); 1257 1258 break; 1259 case Instruction::URem: { 1260 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1261 const APInt &RA = Rem->getValue(); 1262 if (RA.isPowerOf2()) { 1263 APInt LowBits = (RA - 1); 1264 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1265 Known.Zero |= ~LowBits; 1266 Known.One &= LowBits; 1267 break; 1268 } 1269 } 1270 1271 // Since the result is less than or equal to either operand, any leading 1272 // zero bits in either operand must also exist in the result. 1273 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1274 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1275 1276 unsigned Leaders = 1277 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1278 Known.resetAll(); 1279 Known.Zero.setHighBits(Leaders); 1280 break; 1281 } 1282 1283 case Instruction::Alloca: { 1284 const AllocaInst *AI = cast<AllocaInst>(I); 1285 unsigned Align = AI->getAlignment(); 1286 if (Align == 0) 1287 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); 1288 1289 if (Align > 0) 1290 Known.Zero.setLowBits(countTrailingZeros(Align)); 1291 break; 1292 } 1293 case Instruction::GetElementPtr: { 1294 // Analyze all of the subscripts of this getelementptr instruction 1295 // to determine if we can prove known low zero bits. 1296 KnownBits LocalKnown(BitWidth); 1297 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q); 1298 unsigned TrailZ = LocalKnown.countMinTrailingZeros(); 1299 1300 gep_type_iterator GTI = gep_type_begin(I); 1301 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1302 Value *Index = I->getOperand(i); 1303 if (StructType *STy = GTI.getStructTypeOrNull()) { 1304 // Handle struct member offset arithmetic. 1305 1306 // Handle case when index is vector zeroinitializer 1307 Constant *CIndex = cast<Constant>(Index); 1308 if (CIndex->isZeroValue()) 1309 continue; 1310 1311 if (CIndex->getType()->isVectorTy()) 1312 Index = CIndex->getSplatValue(); 1313 1314 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1315 const StructLayout *SL = Q.DL.getStructLayout(STy); 1316 uint64_t Offset = SL->getElementOffset(Idx); 1317 TrailZ = std::min<unsigned>(TrailZ, 1318 countTrailingZeros(Offset)); 1319 } else { 1320 // Handle array index arithmetic. 1321 Type *IndexedTy = GTI.getIndexedType(); 1322 if (!IndexedTy->isSized()) { 1323 TrailZ = 0; 1324 break; 1325 } 1326 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1327 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1328 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0); 1329 computeKnownBits(Index, LocalKnown, Depth + 1, Q); 1330 TrailZ = std::min(TrailZ, 1331 unsigned(countTrailingZeros(TypeSize) + 1332 LocalKnown.countMinTrailingZeros())); 1333 } 1334 } 1335 1336 Known.Zero.setLowBits(TrailZ); 1337 break; 1338 } 1339 case Instruction::PHI: { 1340 const PHINode *P = cast<PHINode>(I); 1341 // Handle the case of a simple two-predecessor recurrence PHI. 1342 // There's a lot more that could theoretically be done here, but 1343 // this is sufficient to catch some interesting cases. 1344 if (P->getNumIncomingValues() == 2) { 1345 for (unsigned i = 0; i != 2; ++i) { 1346 Value *L = P->getIncomingValue(i); 1347 Value *R = P->getIncomingValue(!i); 1348 Operator *LU = dyn_cast<Operator>(L); 1349 if (!LU) 1350 continue; 1351 unsigned Opcode = LU->getOpcode(); 1352 // Check for operations that have the property that if 1353 // both their operands have low zero bits, the result 1354 // will have low zero bits. 1355 if (Opcode == Instruction::Add || 1356 Opcode == Instruction::Sub || 1357 Opcode == Instruction::And || 1358 Opcode == Instruction::Or || 1359 Opcode == Instruction::Mul) { 1360 Value *LL = LU->getOperand(0); 1361 Value *LR = LU->getOperand(1); 1362 // Find a recurrence. 1363 if (LL == I) 1364 L = LR; 1365 else if (LR == I) 1366 L = LL; 1367 else 1368 break; 1369 // Ok, we have a PHI of the form L op= R. Check for low 1370 // zero bits. 1371 computeKnownBits(R, Known2, Depth + 1, Q); 1372 1373 // We need to take the minimum number of known bits 1374 KnownBits Known3(Known); 1375 computeKnownBits(L, Known3, Depth + 1, Q); 1376 1377 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1378 Known3.countMinTrailingZeros())); 1379 1380 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU); 1381 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) { 1382 // If initial value of recurrence is nonnegative, and we are adding 1383 // a nonnegative number with nsw, the result can only be nonnegative 1384 // or poison value regardless of the number of times we execute the 1385 // add in phi recurrence. If initial value is negative and we are 1386 // adding a negative number with nsw, the result can only be 1387 // negative or poison value. Similar arguments apply to sub and mul. 1388 // 1389 // (add non-negative, non-negative) --> non-negative 1390 // (add negative, negative) --> negative 1391 if (Opcode == Instruction::Add) { 1392 if (Known2.isNonNegative() && Known3.isNonNegative()) 1393 Known.makeNonNegative(); 1394 else if (Known2.isNegative() && Known3.isNegative()) 1395 Known.makeNegative(); 1396 } 1397 1398 // (sub nsw non-negative, negative) --> non-negative 1399 // (sub nsw negative, non-negative) --> negative 1400 else if (Opcode == Instruction::Sub && LL == I) { 1401 if (Known2.isNonNegative() && Known3.isNegative()) 1402 Known.makeNonNegative(); 1403 else if (Known2.isNegative() && Known3.isNonNegative()) 1404 Known.makeNegative(); 1405 } 1406 1407 // (mul nsw non-negative, non-negative) --> non-negative 1408 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1409 Known3.isNonNegative()) 1410 Known.makeNonNegative(); 1411 } 1412 1413 break; 1414 } 1415 } 1416 } 1417 1418 // Unreachable blocks may have zero-operand PHI nodes. 1419 if (P->getNumIncomingValues() == 0) 1420 break; 1421 1422 // Otherwise take the unions of the known bit sets of the operands, 1423 // taking conservative care to avoid excessive recursion. 1424 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) { 1425 // Skip if every incoming value references to ourself. 1426 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1427 break; 1428 1429 Known.Zero.setAllBits(); 1430 Known.One.setAllBits(); 1431 for (Value *IncValue : P->incoming_values()) { 1432 // Skip direct self references. 1433 if (IncValue == P) continue; 1434 1435 Known2 = KnownBits(BitWidth); 1436 // Recurse, but cap the recursion to one level, because we don't 1437 // want to waste time spinning around in loops. 1438 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q); 1439 Known.Zero &= Known2.Zero; 1440 Known.One &= Known2.One; 1441 // If all bits have been ruled out, there's no need to check 1442 // more operands. 1443 if (!Known.Zero && !Known.One) 1444 break; 1445 } 1446 } 1447 break; 1448 } 1449 case Instruction::Call: 1450 case Instruction::Invoke: 1451 // If range metadata is attached to this call, set known bits from that, 1452 // and then intersect with known bits based on other properties of the 1453 // function. 1454 if (MDNode *MD = 1455 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range)) 1456 computeKnownBitsFromRangeMetadata(*MD, Known); 1457 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) { 1458 computeKnownBits(RV, Known2, Depth + 1, Q); 1459 Known.Zero |= Known2.Zero; 1460 Known.One |= Known2.One; 1461 } 1462 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1463 switch (II->getIntrinsicID()) { 1464 default: break; 1465 case Intrinsic::bitreverse: 1466 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1467 Known.Zero |= Known2.Zero.reverseBits(); 1468 Known.One |= Known2.One.reverseBits(); 1469 break; 1470 case Intrinsic::bswap: 1471 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1472 Known.Zero |= Known2.Zero.byteSwap(); 1473 Known.One |= Known2.One.byteSwap(); 1474 break; 1475 case Intrinsic::ctlz: { 1476 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1477 // If we have a known 1, its position is our upper bound. 1478 unsigned PossibleLZ = Known2.One.countLeadingZeros(); 1479 // If this call is undefined for 0, the result will be less than 2^n. 1480 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1481 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1482 unsigned LowBits = Log2_32(PossibleLZ)+1; 1483 Known.Zero.setBitsFrom(LowBits); 1484 break; 1485 } 1486 case Intrinsic::cttz: { 1487 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1488 // If we have a known 1, its position is our upper bound. 1489 unsigned PossibleTZ = Known2.One.countTrailingZeros(); 1490 // If this call is undefined for 0, the result will be less than 2^n. 1491 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1492 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1493 unsigned LowBits = Log2_32(PossibleTZ)+1; 1494 Known.Zero.setBitsFrom(LowBits); 1495 break; 1496 } 1497 case Intrinsic::ctpop: { 1498 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1499 // We can bound the space the count needs. Also, bits known to be zero 1500 // can't contribute to the population. 1501 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1502 unsigned LowBits = Log2_32(BitsPossiblySet)+1; 1503 Known.Zero.setBitsFrom(LowBits); 1504 // TODO: we could bound KnownOne using the lower bound on the number 1505 // of bits which might be set provided by popcnt KnownOne2. 1506 break; 1507 } 1508 case Intrinsic::fshr: 1509 case Intrinsic::fshl: { 1510 const APInt *SA; 1511 if (!match(I->getOperand(2), m_APInt(SA))) 1512 break; 1513 1514 // Normalize to funnel shift left. 1515 uint64_t ShiftAmt = SA->urem(BitWidth); 1516 if (II->getIntrinsicID() == Intrinsic::fshr) 1517 ShiftAmt = BitWidth - ShiftAmt; 1518 1519 KnownBits Known3(Known); 1520 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1521 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q); 1522 1523 Known.Zero = 1524 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); 1525 Known.One = 1526 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt); 1527 break; 1528 } 1529 case Intrinsic::x86_sse42_crc32_64_64: 1530 Known.Zero.setBitsFrom(32); 1531 break; 1532 } 1533 } 1534 break; 1535 case Instruction::ExtractElement: 1536 // Look through extract element. At the moment we keep this simple and skip 1537 // tracking the specific element. But at least we might find information 1538 // valid for all elements of the vector (for example if vector is sign 1539 // extended, shifted, etc). 1540 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1541 break; 1542 case Instruction::ExtractValue: 1543 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1544 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1545 if (EVI->getNumIndices() != 1) break; 1546 if (EVI->getIndices()[0] == 0) { 1547 switch (II->getIntrinsicID()) { 1548 default: break; 1549 case Intrinsic::uadd_with_overflow: 1550 case Intrinsic::sadd_with_overflow: 1551 computeKnownBitsAddSub(true, II->getArgOperand(0), 1552 II->getArgOperand(1), false, Known, Known2, 1553 Depth, Q); 1554 break; 1555 case Intrinsic::usub_with_overflow: 1556 case Intrinsic::ssub_with_overflow: 1557 computeKnownBitsAddSub(false, II->getArgOperand(0), 1558 II->getArgOperand(1), false, Known, Known2, 1559 Depth, Q); 1560 break; 1561 case Intrinsic::umul_with_overflow: 1562 case Intrinsic::smul_with_overflow: 1563 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1564 Known, Known2, Depth, Q); 1565 break; 1566 } 1567 } 1568 } 1569 } 1570 } 1571 1572 /// Determine which bits of V are known to be either zero or one and return 1573 /// them. 1574 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { 1575 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1576 computeKnownBits(V, Known, Depth, Q); 1577 return Known; 1578 } 1579 1580 /// Determine which bits of V are known to be either zero or one and return 1581 /// them in the Known bit set. 1582 /// 1583 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1584 /// we cannot optimize based on the assumption that it is zero without changing 1585 /// it to be an explicit zero. If we don't change it to zero, other code could 1586 /// optimized based on the contradictory assumption that it is non-zero. 1587 /// Because instcombine aggressively folds operations with undef args anyway, 1588 /// this won't lose us code quality. 1589 /// 1590 /// This function is defined on values with integer type, values with pointer 1591 /// type, and vectors of integers. In the case 1592 /// where V is a vector, known zero, and known one values are the 1593 /// same width as the vector element, and the bit is set only if it is true 1594 /// for all of the elements in the vector. 1595 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 1596 const Query &Q) { 1597 assert(V && "No Value?"); 1598 assert(Depth <= MaxDepth && "Limit Search Depth"); 1599 unsigned BitWidth = Known.getBitWidth(); 1600 1601 assert((V->getType()->isIntOrIntVectorTy(BitWidth) || 1602 V->getType()->isPtrOrPtrVectorTy()) && 1603 "Not integer or pointer type!"); 1604 1605 Type *ScalarTy = V->getType()->getScalarType(); 1606 unsigned ExpectedWidth = ScalarTy->isPointerTy() ? 1607 Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); 1608 assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth"); 1609 (void)BitWidth; 1610 (void)ExpectedWidth; 1611 1612 const APInt *C; 1613 if (match(V, m_APInt(C))) { 1614 // We know all of the bits for a scalar constant or a splat vector constant! 1615 Known.One = *C; 1616 Known.Zero = ~Known.One; 1617 return; 1618 } 1619 // Null and aggregate-zero are all-zeros. 1620 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1621 Known.setAllZero(); 1622 return; 1623 } 1624 // Handle a constant vector by taking the intersection of the known bits of 1625 // each element. 1626 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 1627 // We know that CDS must be a vector of integers. Take the intersection of 1628 // each element. 1629 Known.Zero.setAllBits(); Known.One.setAllBits(); 1630 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1631 APInt Elt = CDS->getElementAsAPInt(i); 1632 Known.Zero &= ~Elt; 1633 Known.One &= Elt; 1634 } 1635 return; 1636 } 1637 1638 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1639 // We know that CV must be a vector of integers. Take the intersection of 1640 // each element. 1641 Known.Zero.setAllBits(); Known.One.setAllBits(); 1642 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1643 Constant *Element = CV->getAggregateElement(i); 1644 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1645 if (!ElementCI) { 1646 Known.resetAll(); 1647 return; 1648 } 1649 const APInt &Elt = ElementCI->getValue(); 1650 Known.Zero &= ~Elt; 1651 Known.One &= Elt; 1652 } 1653 return; 1654 } 1655 1656 // Start out not knowing anything. 1657 Known.resetAll(); 1658 1659 // We can't imply anything about undefs. 1660 if (isa<UndefValue>(V)) 1661 return; 1662 1663 // There's no point in looking through other users of ConstantData for 1664 // assumptions. Confirm that we've handled them all. 1665 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1666 1667 // Limit search depth. 1668 // All recursive calls that increase depth must come after this. 1669 if (Depth == MaxDepth) 1670 return; 1671 1672 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1673 // the bits of its aliasee. 1674 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1675 if (!GA->isInterposable()) 1676 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 1677 return; 1678 } 1679 1680 if (const Operator *I = dyn_cast<Operator>(V)) 1681 computeKnownBitsFromOperator(I, Known, Depth, Q); 1682 1683 // Aligned pointers have trailing zeros - refine Known.Zero set 1684 if (V->getType()->isPointerTy()) { 1685 unsigned Align = V->getPointerAlignment(Q.DL); 1686 if (Align) 1687 Known.Zero.setLowBits(countTrailingZeros(Align)); 1688 } 1689 1690 // computeKnownBitsFromAssume strictly refines Known. 1691 // Therefore, we run them after computeKnownBitsFromOperator. 1692 1693 // Check whether a nearby assume intrinsic can determine some known bits. 1694 computeKnownBitsFromAssume(V, Known, Depth, Q); 1695 1696 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1697 } 1698 1699 /// Return true if the given value is known to have exactly one 1700 /// bit set when defined. For vectors return true if every element is known to 1701 /// be a power of two when defined. Supports values with integer or pointer 1702 /// types and vectors of integers. 1703 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 1704 const Query &Q) { 1705 assert(Depth <= MaxDepth && "Limit Search Depth"); 1706 1707 // Attempt to match against constants. 1708 if (OrZero && match(V, m_Power2OrZero())) 1709 return true; 1710 if (match(V, m_Power2())) 1711 return true; 1712 1713 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1714 // it is shifted off the end then the result is undefined. 1715 if (match(V, m_Shl(m_One(), m_Value()))) 1716 return true; 1717 1718 // (signmask) >>l X is clearly a power of two if the one is not shifted off 1719 // the bottom. If it is shifted off the bottom then the result is undefined. 1720 if (match(V, m_LShr(m_SignMask(), m_Value()))) 1721 return true; 1722 1723 // The remaining tests are all recursive, so bail out if we hit the limit. 1724 if (Depth++ == MaxDepth) 1725 return false; 1726 1727 Value *X = nullptr, *Y = nullptr; 1728 // A shift left or a logical shift right of a power of two is a power of two 1729 // or zero. 1730 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1731 match(V, m_LShr(m_Value(X), m_Value())))) 1732 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 1733 1734 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1735 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 1736 1737 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 1738 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 1739 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 1740 1741 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1742 // A power of two and'd with anything is a power of two or zero. 1743 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 1744 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 1745 return true; 1746 // X & (-X) is always a power of two or zero. 1747 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1748 return true; 1749 return false; 1750 } 1751 1752 // Adding a power-of-two or zero to the same power-of-two or zero yields 1753 // either the original power-of-two, a larger power-of-two or zero. 1754 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1755 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1756 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) || 1757 Q.IIQ.hasNoSignedWrap(VOBO)) { 1758 if (match(X, m_And(m_Specific(Y), m_Value())) || 1759 match(X, m_And(m_Value(), m_Specific(Y)))) 1760 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 1761 return true; 1762 if (match(Y, m_And(m_Specific(X), m_Value())) || 1763 match(Y, m_And(m_Value(), m_Specific(X)))) 1764 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 1765 return true; 1766 1767 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1768 KnownBits LHSBits(BitWidth); 1769 computeKnownBits(X, LHSBits, Depth, Q); 1770 1771 KnownBits RHSBits(BitWidth); 1772 computeKnownBits(Y, RHSBits, Depth, Q); 1773 // If i8 V is a power of two or zero: 1774 // ZeroBits: 1 1 1 0 1 1 1 1 1775 // ~ZeroBits: 0 0 0 1 0 0 0 0 1776 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 1777 // If OrZero isn't set, we cannot give back a zero result. 1778 // Make sure either the LHS or RHS has a bit set. 1779 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 1780 return true; 1781 } 1782 } 1783 1784 // An exact divide or right shift can only shift off zero bits, so the result 1785 // is a power of two only if the first operand is a power of two and not 1786 // copying a sign bit (sdiv int_min, 2). 1787 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1788 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1789 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1790 Depth, Q); 1791 } 1792 1793 return false; 1794 } 1795 1796 /// Test whether a GEP's result is known to be non-null. 1797 /// 1798 /// Uses properties inherent in a GEP to try to determine whether it is known 1799 /// to be non-null. 1800 /// 1801 /// Currently this routine does not support vector GEPs. 1802 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 1803 const Query &Q) { 1804 const Function *F = nullptr; 1805 if (const Instruction *I = dyn_cast<Instruction>(GEP)) 1806 F = I->getFunction(); 1807 1808 if (!GEP->isInBounds() || 1809 NullPointerIsDefined(F, GEP->getPointerAddressSpace())) 1810 return false; 1811 1812 // FIXME: Support vector-GEPs. 1813 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1814 1815 // If the base pointer is non-null, we cannot walk to a null address with an 1816 // inbounds GEP in address space zero. 1817 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 1818 return true; 1819 1820 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1821 // If so, then the GEP cannot produce a null pointer, as doing so would 1822 // inherently violate the inbounds contract within address space zero. 1823 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1824 GTI != GTE; ++GTI) { 1825 // Struct types are easy -- they must always be indexed by a constant. 1826 if (StructType *STy = GTI.getStructTypeOrNull()) { 1827 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1828 unsigned ElementIdx = OpC->getZExtValue(); 1829 const StructLayout *SL = Q.DL.getStructLayout(STy); 1830 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1831 if (ElementOffset > 0) 1832 return true; 1833 continue; 1834 } 1835 1836 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1837 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1838 continue; 1839 1840 // Fast path the constant operand case both for efficiency and so we don't 1841 // increment Depth when just zipping down an all-constant GEP. 1842 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1843 if (!OpC->isZero()) 1844 return true; 1845 continue; 1846 } 1847 1848 // We post-increment Depth here because while isKnownNonZero increments it 1849 // as well, when we pop back up that increment won't persist. We don't want 1850 // to recurse 10k times just because we have 10k GEP operands. We don't 1851 // bail completely out because we want to handle constant GEPs regardless 1852 // of depth. 1853 if (Depth++ >= MaxDepth) 1854 continue; 1855 1856 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 1857 return true; 1858 } 1859 1860 return false; 1861 } 1862 1863 static bool isKnownNonNullFromDominatingCondition(const Value *V, 1864 const Instruction *CtxI, 1865 const DominatorTree *DT) { 1866 assert(V->getType()->isPointerTy() && "V must be pointer type"); 1867 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull"); 1868 1869 if (!CtxI || !DT) 1870 return false; 1871 1872 unsigned NumUsesExplored = 0; 1873 for (auto *U : V->users()) { 1874 // Avoid massive lists 1875 if (NumUsesExplored >= DomConditionsMaxUses) 1876 break; 1877 NumUsesExplored++; 1878 1879 // If the value is used as an argument to a call or invoke, then argument 1880 // attributes may provide an answer about null-ness. 1881 if (auto CS = ImmutableCallSite(U)) 1882 if (auto *CalledFunc = CS.getCalledFunction()) 1883 for (const Argument &Arg : CalledFunc->args()) 1884 if (CS.getArgOperand(Arg.getArgNo()) == V && 1885 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) 1886 return true; 1887 1888 // Consider only compare instructions uniquely controlling a branch 1889 CmpInst::Predicate Pred; 1890 if (!match(const_cast<User *>(U), 1891 m_c_ICmp(Pred, m_Specific(V), m_Zero())) || 1892 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) 1893 continue; 1894 1895 SmallVector<const User *, 4> WorkList; 1896 SmallPtrSet<const User *, 4> Visited; 1897 for (auto *CmpU : U->users()) { 1898 assert(WorkList.empty() && "Should be!"); 1899 if (Visited.insert(CmpU).second) 1900 WorkList.push_back(CmpU); 1901 1902 while (!WorkList.empty()) { 1903 auto *Curr = WorkList.pop_back_val(); 1904 1905 // If a user is an AND, add all its users to the work list. We only 1906 // propagate "pred != null" condition through AND because it is only 1907 // correct to assume that all conditions of AND are met in true branch. 1908 // TODO: Support similar logic of OR and EQ predicate? 1909 if (Pred == ICmpInst::ICMP_NE) 1910 if (auto *BO = dyn_cast<BinaryOperator>(Curr)) 1911 if (BO->getOpcode() == Instruction::And) { 1912 for (auto *BOU : BO->users()) 1913 if (Visited.insert(BOU).second) 1914 WorkList.push_back(BOU); 1915 continue; 1916 } 1917 1918 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) { 1919 assert(BI->isConditional() && "uses a comparison!"); 1920 1921 BasicBlock *NonNullSuccessor = 1922 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); 1923 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 1924 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 1925 return true; 1926 } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) && 1927 DT->dominates(cast<Instruction>(Curr), CtxI)) { 1928 return true; 1929 } 1930 } 1931 } 1932 } 1933 1934 return false; 1935 } 1936 1937 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1938 /// ensure that the value it's attached to is never Value? 'RangeType' is 1939 /// is the type of the value described by the range. 1940 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 1941 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1942 assert(NumRanges >= 1); 1943 for (unsigned i = 0; i < NumRanges; ++i) { 1944 ConstantInt *Lower = 1945 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1946 ConstantInt *Upper = 1947 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1948 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1949 if (Range.contains(Value)) 1950 return false; 1951 } 1952 return true; 1953 } 1954 1955 /// Return true if the given value is known to be non-zero when defined. For 1956 /// vectors, return true if every element is known to be non-zero when 1957 /// defined. For pointers, if the context instruction and dominator tree are 1958 /// specified, perform context-sensitive analysis and return true if the 1959 /// pointer couldn't possibly be null at the specified instruction. 1960 /// Supports values with integer or pointer type and vectors of integers. 1961 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) { 1962 if (auto *C = dyn_cast<Constant>(V)) { 1963 if (C->isNullValue()) 1964 return false; 1965 if (isa<ConstantInt>(C)) 1966 // Must be non-zero due to null test above. 1967 return true; 1968 1969 // For constant vectors, check that all elements are undefined or known 1970 // non-zero to determine that the whole vector is known non-zero. 1971 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) { 1972 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 1973 Constant *Elt = C->getAggregateElement(i); 1974 if (!Elt || Elt->isNullValue()) 1975 return false; 1976 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 1977 return false; 1978 } 1979 return true; 1980 } 1981 1982 // A global variable in address space 0 is non null unless extern weak 1983 // or an absolute symbol reference. Other address spaces may have null as a 1984 // valid address for a global, so we can't assume anything. 1985 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 1986 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 1987 GV->getType()->getAddressSpace() == 0) 1988 return true; 1989 } else 1990 return false; 1991 } 1992 1993 if (auto *I = dyn_cast<Instruction>(V)) { 1994 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) { 1995 // If the possible ranges don't contain zero, then the value is 1996 // definitely non-zero. 1997 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 1998 const APInt ZeroValue(Ty->getBitWidth(), 0); 1999 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 2000 return true; 2001 } 2002 } 2003 } 2004 2005 // Some of the tests below are recursive, so bail out if we hit the limit. 2006 if (Depth++ >= MaxDepth) 2007 return false; 2008 2009 // Check for pointer simplifications. 2010 if (V->getType()->isPointerTy()) { 2011 // Alloca never returns null, malloc might. 2012 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) 2013 return true; 2014 2015 // A byval, inalloca, or nonnull argument is never null. 2016 if (const Argument *A = dyn_cast<Argument>(V)) 2017 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr()) 2018 return true; 2019 2020 // A Load tagged with nonnull metadata is never null. 2021 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 2022 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull)) 2023 return true; 2024 2025 if (const auto *Call = dyn_cast<CallBase>(V)) { 2026 if (Call->isReturnNonNull()) 2027 return true; 2028 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call)) 2029 return isKnownNonZero(RP, Depth, Q); 2030 } 2031 } 2032 2033 2034 // Check for recursive pointer simplifications. 2035 if (V->getType()->isPointerTy()) { 2036 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) 2037 return true; 2038 2039 // Look through bitcast operations, GEPs, and int2ptr instructions as they 2040 // do not alter the value, or at least not the nullness property of the 2041 // value, e.g., int2ptr is allowed to zero/sign extend the value. 2042 // 2043 // Note that we have to take special care to avoid looking through 2044 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well 2045 // as casts that can alter the value, e.g., AddrSpaceCasts. 2046 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 2047 if (isGEPKnownNonNull(GEP, Depth, Q)) 2048 return true; 2049 2050 if (auto *BCO = dyn_cast<BitCastOperator>(V)) 2051 return isKnownNonZero(BCO->getOperand(0), Depth, Q); 2052 2053 if (auto *I2P = dyn_cast<IntToPtrInst>(V)) 2054 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <= 2055 Q.DL.getTypeSizeInBits(I2P->getDestTy())) 2056 return isKnownNonZero(I2P->getOperand(0), Depth, Q); 2057 } 2058 2059 // Similar to int2ptr above, we can look through ptr2int here if the cast 2060 // is a no-op or an extend and not a truncate. 2061 if (auto *P2I = dyn_cast<PtrToIntInst>(V)) 2062 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <= 2063 Q.DL.getTypeSizeInBits(P2I->getDestTy())) 2064 return isKnownNonZero(P2I->getOperand(0), Depth, Q); 2065 2066 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 2067 2068 // X | Y != 0 if X != 0 or Y != 0. 2069 Value *X = nullptr, *Y = nullptr; 2070 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 2071 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q); 2072 2073 // ext X != 0 if X != 0. 2074 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 2075 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 2076 2077 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 2078 // if the lowest bit is shifted off the end. 2079 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { 2080 // shl nuw can't remove any non-zero bits. 2081 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2082 if (Q.IIQ.hasNoUnsignedWrap(BO)) 2083 return isKnownNonZero(X, Depth, Q); 2084 2085 KnownBits Known(BitWidth); 2086 computeKnownBits(X, Known, Depth, Q); 2087 if (Known.One[0]) 2088 return true; 2089 } 2090 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 2091 // defined if the sign bit is shifted off the end. 2092 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 2093 // shr exact can only shift out zero bits. 2094 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 2095 if (BO->isExact()) 2096 return isKnownNonZero(X, Depth, Q); 2097 2098 KnownBits Known = computeKnownBits(X, Depth, Q); 2099 if (Known.isNegative()) 2100 return true; 2101 2102 // If the shifter operand is a constant, and all of the bits shifted 2103 // out are known to be zero, and X is known non-zero then at least one 2104 // non-zero bit must remain. 2105 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 2106 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 2107 // Is there a known one in the portion not shifted out? 2108 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) 2109 return true; 2110 // Are all the bits to be shifted out known zero? 2111 if (Known.countMinTrailingZeros() >= ShiftVal) 2112 return isKnownNonZero(X, Depth, Q); 2113 } 2114 } 2115 // div exact can only produce a zero if the dividend is zero. 2116 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 2117 return isKnownNonZero(X, Depth, Q); 2118 } 2119 // X + Y. 2120 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2121 KnownBits XKnown = computeKnownBits(X, Depth, Q); 2122 KnownBits YKnown = computeKnownBits(Y, Depth, Q); 2123 2124 // If X and Y are both non-negative (as signed values) then their sum is not 2125 // zero unless both X and Y are zero. 2126 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 2127 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q)) 2128 return true; 2129 2130 // If X and Y are both negative (as signed values) then their sum is not 2131 // zero unless both X and Y equal INT_MIN. 2132 if (XKnown.isNegative() && YKnown.isNegative()) { 2133 APInt Mask = APInt::getSignedMaxValue(BitWidth); 2134 // The sign bit of X is set. If some other bit is set then X is not equal 2135 // to INT_MIN. 2136 if (XKnown.One.intersects(Mask)) 2137 return true; 2138 // The sign bit of Y is set. If some other bit is set then Y is not equal 2139 // to INT_MIN. 2140 if (YKnown.One.intersects(Mask)) 2141 return true; 2142 } 2143 2144 // The sum of a non-negative number and a power of two is not zero. 2145 if (XKnown.isNonNegative() && 2146 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 2147 return true; 2148 if (YKnown.isNonNegative() && 2149 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 2150 return true; 2151 } 2152 // X * Y. 2153 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 2154 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2155 // If X and Y are non-zero then so is X * Y as long as the multiplication 2156 // does not overflow. 2157 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) && 2158 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q)) 2159 return true; 2160 } 2161 // (C ? X : Y) != 0 if X != 0 and Y != 0. 2162 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 2163 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) && 2164 isKnownNonZero(SI->getFalseValue(), Depth, Q)) 2165 return true; 2166 } 2167 // PHI 2168 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 2169 // Try and detect a recurrence that monotonically increases from a 2170 // starting value, as these are common as induction variables. 2171 if (PN->getNumIncomingValues() == 2) { 2172 Value *Start = PN->getIncomingValue(0); 2173 Value *Induction = PN->getIncomingValue(1); 2174 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 2175 std::swap(Start, Induction); 2176 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 2177 if (!C->isZero() && !C->isNegative()) { 2178 ConstantInt *X; 2179 if (Q.IIQ.UseInstrInfo && 2180 (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 2181 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 2182 !X->isNegative()) 2183 return true; 2184 } 2185 } 2186 } 2187 // Check if all incoming values are non-zero constant. 2188 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) { 2189 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero(); 2190 }); 2191 if (AllNonZeroConstants) 2192 return true; 2193 } 2194 2195 KnownBits Known(BitWidth); 2196 computeKnownBits(V, Known, Depth, Q); 2197 return Known.One != 0; 2198 } 2199 2200 /// Return true if V2 == V1 + X, where X is known non-zero. 2201 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { 2202 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2203 if (!BO || BO->getOpcode() != Instruction::Add) 2204 return false; 2205 Value *Op = nullptr; 2206 if (V2 == BO->getOperand(0)) 2207 Op = BO->getOperand(1); 2208 else if (V2 == BO->getOperand(1)) 2209 Op = BO->getOperand(0); 2210 else 2211 return false; 2212 return isKnownNonZero(Op, 0, Q); 2213 } 2214 2215 /// Return true if it is known that V1 != V2. 2216 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { 2217 if (V1 == V2) 2218 return false; 2219 if (V1->getType() != V2->getType()) 2220 // We can't look through casts yet. 2221 return false; 2222 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) 2223 return true; 2224 2225 if (V1->getType()->isIntOrIntVectorTy()) { 2226 // Are any known bits in V1 contradictory to known bits in V2? If V1 2227 // has a known zero where V2 has a known one, they must not be equal. 2228 KnownBits Known1 = computeKnownBits(V1, 0, Q); 2229 KnownBits Known2 = computeKnownBits(V2, 0, Q); 2230 2231 if (Known1.Zero.intersects(Known2.One) || 2232 Known2.Zero.intersects(Known1.One)) 2233 return true; 2234 } 2235 return false; 2236 } 2237 2238 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2239 /// simplify operations downstream. Mask is known to be zero for bits that V 2240 /// cannot have. 2241 /// 2242 /// This function is defined on values with integer type, values with pointer 2243 /// type, and vectors of integers. In the case 2244 /// where V is a vector, the mask, known zero, and known one values are the 2245 /// same width as the vector element, and the bit is set only if it is true 2246 /// for all of the elements in the vector. 2247 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2248 const Query &Q) { 2249 KnownBits Known(Mask.getBitWidth()); 2250 computeKnownBits(V, Known, Depth, Q); 2251 return Mask.isSubsetOf(Known.Zero); 2252 } 2253 2254 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow). 2255 // Returns the input and lower/upper bounds. 2256 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, 2257 const APInt *&CLow, const APInt *&CHigh) { 2258 assert(isa<Operator>(Select) && 2259 cast<Operator>(Select)->getOpcode() == Instruction::Select && 2260 "Input should be a Select!"); 2261 2262 const Value *LHS, *RHS, *LHS2, *RHS2; 2263 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor; 2264 if (SPF != SPF_SMAX && SPF != SPF_SMIN) 2265 return false; 2266 2267 if (!match(RHS, m_APInt(CLow))) 2268 return false; 2269 2270 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor; 2271 if (getInverseMinMaxFlavor(SPF) != SPF2) 2272 return false; 2273 2274 if (!match(RHS2, m_APInt(CHigh))) 2275 return false; 2276 2277 if (SPF == SPF_SMIN) 2278 std::swap(CLow, CHigh); 2279 2280 In = LHS2; 2281 return CLow->sle(*CHigh); 2282 } 2283 2284 /// For vector constants, loop over the elements and find the constant with the 2285 /// minimum number of sign bits. Return 0 if the value is not a vector constant 2286 /// or if any element was not analyzed; otherwise, return the count for the 2287 /// element with the minimum number of sign bits. 2288 static unsigned computeNumSignBitsVectorConstant(const Value *V, 2289 unsigned TyBits) { 2290 const auto *CV = dyn_cast<Constant>(V); 2291 if (!CV || !CV->getType()->isVectorTy()) 2292 return 0; 2293 2294 unsigned MinSignBits = TyBits; 2295 unsigned NumElts = CV->getType()->getVectorNumElements(); 2296 for (unsigned i = 0; i != NumElts; ++i) { 2297 // If we find a non-ConstantInt, bail out. 2298 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 2299 if (!Elt) 2300 return 0; 2301 2302 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); 2303 } 2304 2305 return MinSignBits; 2306 } 2307 2308 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2309 const Query &Q); 2310 2311 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 2312 const Query &Q) { 2313 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q); 2314 assert(Result > 0 && "At least one sign bit needs to be present!"); 2315 return Result; 2316 } 2317 2318 /// Return the number of times the sign bit of the register is replicated into 2319 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2320 /// (itself), but other cases can give us information. For example, immediately 2321 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2322 /// other, so we return 3. For vectors, return the number of sign bits for the 2323 /// vector element with the minimum number of known sign bits. 2324 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2325 const Query &Q) { 2326 assert(Depth <= MaxDepth && "Limit Search Depth"); 2327 2328 // We return the minimum number of sign bits that are guaranteed to be present 2329 // in V, so for undef we have to conservatively return 1. We don't have the 2330 // same behavior for poison though -- that's a FIXME today. 2331 2332 Type *ScalarTy = V->getType()->getScalarType(); 2333 unsigned TyBits = ScalarTy->isPointerTy() ? 2334 Q.DL.getIndexTypeSizeInBits(ScalarTy) : 2335 Q.DL.getTypeSizeInBits(ScalarTy); 2336 2337 unsigned Tmp, Tmp2; 2338 unsigned FirstAnswer = 1; 2339 2340 // Note that ConstantInt is handled by the general computeKnownBits case 2341 // below. 2342 2343 if (Depth == MaxDepth) 2344 return 1; // Limit search depth. 2345 2346 const Operator *U = dyn_cast<Operator>(V); 2347 switch (Operator::getOpcode(V)) { 2348 default: break; 2349 case Instruction::SExt: 2350 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2351 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 2352 2353 case Instruction::SDiv: { 2354 const APInt *Denominator; 2355 // sdiv X, C -> adds log(C) sign bits. 2356 if (match(U->getOperand(1), m_APInt(Denominator))) { 2357 2358 // Ignore non-positive denominator. 2359 if (!Denominator->isStrictlyPositive()) 2360 break; 2361 2362 // Calculate the incoming numerator bits. 2363 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2364 2365 // Add floor(log(C)) bits to the numerator bits. 2366 return std::min(TyBits, NumBits + Denominator->logBase2()); 2367 } 2368 break; 2369 } 2370 2371 case Instruction::SRem: { 2372 const APInt *Denominator; 2373 // srem X, C -> we know that the result is within [-C+1,C) when C is a 2374 // positive constant. This let us put a lower bound on the number of sign 2375 // bits. 2376 if (match(U->getOperand(1), m_APInt(Denominator))) { 2377 2378 // Ignore non-positive denominator. 2379 if (!Denominator->isStrictlyPositive()) 2380 break; 2381 2382 // Calculate the incoming numerator bits. SRem by a positive constant 2383 // can't lower the number of sign bits. 2384 unsigned NumrBits = 2385 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2386 2387 // Calculate the leading sign bit constraints by examining the 2388 // denominator. Given that the denominator is positive, there are two 2389 // cases: 2390 // 2391 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2392 // (1 << ceilLogBase2(C)). 2393 // 2394 // 2. the numerator is negative. Then the result range is (-C,0] and 2395 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2396 // 2397 // Thus a lower bound on the number of sign bits is `TyBits - 2398 // ceilLogBase2(C)`. 2399 2400 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2401 return std::max(NumrBits, ResBits); 2402 } 2403 break; 2404 } 2405 2406 case Instruction::AShr: { 2407 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2408 // ashr X, C -> adds C sign bits. Vectors too. 2409 const APInt *ShAmt; 2410 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2411 if (ShAmt->uge(TyBits)) 2412 break; // Bad shift. 2413 unsigned ShAmtLimited = ShAmt->getZExtValue(); 2414 Tmp += ShAmtLimited; 2415 if (Tmp > TyBits) Tmp = TyBits; 2416 } 2417 return Tmp; 2418 } 2419 case Instruction::Shl: { 2420 const APInt *ShAmt; 2421 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2422 // shl destroys sign bits. 2423 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2424 if (ShAmt->uge(TyBits) || // Bad shift. 2425 ShAmt->uge(Tmp)) break; // Shifted all sign bits out. 2426 Tmp2 = ShAmt->getZExtValue(); 2427 return Tmp - Tmp2; 2428 } 2429 break; 2430 } 2431 case Instruction::And: 2432 case Instruction::Or: 2433 case Instruction::Xor: // NOT is handled here. 2434 // Logical binary ops preserve the number of sign bits at the worst. 2435 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2436 if (Tmp != 1) { 2437 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2438 FirstAnswer = std::min(Tmp, Tmp2); 2439 // We computed what we know about the sign bits as our first 2440 // answer. Now proceed to the generic code that uses 2441 // computeKnownBits, and pick whichever answer is better. 2442 } 2443 break; 2444 2445 case Instruction::Select: { 2446 // If we have a clamp pattern, we know that the number of sign bits will be 2447 // the minimum of the clamp min/max range. 2448 const Value *X; 2449 const APInt *CLow, *CHigh; 2450 if (isSignedMinMaxClamp(U, X, CLow, CHigh)) 2451 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); 2452 2453 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2454 if (Tmp == 1) break; 2455 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 2456 return std::min(Tmp, Tmp2); 2457 } 2458 2459 case Instruction::Add: 2460 // Add can have at most one carry bit. Thus we know that the output 2461 // is, at worst, one more bit than the inputs. 2462 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2463 if (Tmp == 1) break; 2464 2465 // Special case decrementing a value (ADD X, -1): 2466 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2467 if (CRHS->isAllOnesValue()) { 2468 KnownBits Known(TyBits); 2469 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 2470 2471 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2472 // sign bits set. 2473 if ((Known.Zero | 1).isAllOnesValue()) 2474 return TyBits; 2475 2476 // If we are subtracting one from a positive number, there is no carry 2477 // out of the result. 2478 if (Known.isNonNegative()) 2479 return Tmp; 2480 } 2481 2482 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2483 if (Tmp2 == 1) break; 2484 return std::min(Tmp, Tmp2)-1; 2485 2486 case Instruction::Sub: 2487 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2488 if (Tmp2 == 1) break; 2489 2490 // Handle NEG. 2491 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2492 if (CLHS->isNullValue()) { 2493 KnownBits Known(TyBits); 2494 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 2495 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2496 // sign bits set. 2497 if ((Known.Zero | 1).isAllOnesValue()) 2498 return TyBits; 2499 2500 // If the input is known to be positive (the sign bit is known clear), 2501 // the output of the NEG has the same number of sign bits as the input. 2502 if (Known.isNonNegative()) 2503 return Tmp2; 2504 2505 // Otherwise, we treat this like a SUB. 2506 } 2507 2508 // Sub can have at most one carry bit. Thus we know that the output 2509 // is, at worst, one more bit than the inputs. 2510 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2511 if (Tmp == 1) break; 2512 return std::min(Tmp, Tmp2)-1; 2513 2514 case Instruction::Mul: { 2515 // The output of the Mul can be at most twice the valid bits in the inputs. 2516 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2517 if (SignBitsOp0 == 1) break; 2518 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2519 if (SignBitsOp1 == 1) break; 2520 unsigned OutValidBits = 2521 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); 2522 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; 2523 } 2524 2525 case Instruction::PHI: { 2526 const PHINode *PN = cast<PHINode>(U); 2527 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2528 // Don't analyze large in-degree PHIs. 2529 if (NumIncomingValues > 4) break; 2530 // Unreachable blocks may have zero-operand PHI nodes. 2531 if (NumIncomingValues == 0) break; 2532 2533 // Take the minimum of all incoming values. This can't infinitely loop 2534 // because of our depth threshold. 2535 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q); 2536 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2537 if (Tmp == 1) return Tmp; 2538 Tmp = std::min( 2539 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q)); 2540 } 2541 return Tmp; 2542 } 2543 2544 case Instruction::Trunc: 2545 // FIXME: it's tricky to do anything useful for this, but it is an important 2546 // case for targets like X86. 2547 break; 2548 2549 case Instruction::ExtractElement: 2550 // Look through extract element. At the moment we keep this simple and skip 2551 // tracking the specific element. But at least we might find information 2552 // valid for all elements of the vector (for example if vector is sign 2553 // extended, shifted, etc). 2554 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2555 2556 case Instruction::ShuffleVector: { 2557 // TODO: This is copied almost directly from the SelectionDAG version of 2558 // ComputeNumSignBits. It would be better if we could share common 2559 // code. If not, make sure that changes are translated to the DAG. 2560 2561 // Collect the minimum number of sign bits that are shared by every vector 2562 // element referenced by the shuffle. 2563 auto *Shuf = cast<ShuffleVectorInst>(U); 2564 int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements(); 2565 int NumMaskElts = Shuf->getMask()->getType()->getVectorNumElements(); 2566 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2567 for (int i = 0; i != NumMaskElts; ++i) { 2568 int M = Shuf->getMaskValue(i); 2569 assert(M < NumElts * 2 && "Invalid shuffle mask constant"); 2570 // For undef elements, we don't know anything about the common state of 2571 // the shuffle result. 2572 if (M == -1) 2573 return 1; 2574 if (M < NumElts) 2575 DemandedLHS.setBit(M % NumElts); 2576 else 2577 DemandedRHS.setBit(M % NumElts); 2578 } 2579 Tmp = std::numeric_limits<unsigned>::max(); 2580 if (!!DemandedLHS) 2581 Tmp = ComputeNumSignBits(Shuf->getOperand(0), Depth + 1, Q); 2582 if (!!DemandedRHS) { 2583 Tmp2 = ComputeNumSignBits(Shuf->getOperand(1), Depth + 1, Q); 2584 Tmp = std::min(Tmp, Tmp2); 2585 } 2586 // If we don't know anything, early out and try computeKnownBits fall-back. 2587 if (Tmp == 1) 2588 break; 2589 assert(Tmp <= V->getType()->getScalarSizeInBits() && 2590 "Failed to determine minimum sign bits"); 2591 return Tmp; 2592 } 2593 } 2594 2595 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2596 // use this information. 2597 2598 // If we can examine all elements of a vector constant successfully, we're 2599 // done (we can't do any better than that). If not, keep trying. 2600 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits)) 2601 return VecSignBits; 2602 2603 KnownBits Known(TyBits); 2604 computeKnownBits(V, Known, Depth, Q); 2605 2606 // If we know that the sign bit is either zero or one, determine the number of 2607 // identical bits in the top of the input value. 2608 return std::max(FirstAnswer, Known.countMinSignBits()); 2609 } 2610 2611 /// This function computes the integer multiple of Base that equals V. 2612 /// If successful, it returns true and returns the multiple in 2613 /// Multiple. If unsuccessful, it returns false. It looks 2614 /// through SExt instructions only if LookThroughSExt is true. 2615 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2616 bool LookThroughSExt, unsigned Depth) { 2617 const unsigned MaxDepth = 6; 2618 2619 assert(V && "No Value?"); 2620 assert(Depth <= MaxDepth && "Limit Search Depth"); 2621 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2622 2623 Type *T = V->getType(); 2624 2625 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2626 2627 if (Base == 0) 2628 return false; 2629 2630 if (Base == 1) { 2631 Multiple = V; 2632 return true; 2633 } 2634 2635 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2636 Constant *BaseVal = ConstantInt::get(T, Base); 2637 if (CO && CO == BaseVal) { 2638 // Multiple is 1. 2639 Multiple = ConstantInt::get(T, 1); 2640 return true; 2641 } 2642 2643 if (CI && CI->getZExtValue() % Base == 0) { 2644 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2645 return true; 2646 } 2647 2648 if (Depth == MaxDepth) return false; // Limit search depth. 2649 2650 Operator *I = dyn_cast<Operator>(V); 2651 if (!I) return false; 2652 2653 switch (I->getOpcode()) { 2654 default: break; 2655 case Instruction::SExt: 2656 if (!LookThroughSExt) return false; 2657 // otherwise fall through to ZExt 2658 LLVM_FALLTHROUGH; 2659 case Instruction::ZExt: 2660 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2661 LookThroughSExt, Depth+1); 2662 case Instruction::Shl: 2663 case Instruction::Mul: { 2664 Value *Op0 = I->getOperand(0); 2665 Value *Op1 = I->getOperand(1); 2666 2667 if (I->getOpcode() == Instruction::Shl) { 2668 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2669 if (!Op1CI) return false; 2670 // Turn Op0 << Op1 into Op0 * 2^Op1 2671 APInt Op1Int = Op1CI->getValue(); 2672 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2673 APInt API(Op1Int.getBitWidth(), 0); 2674 API.setBit(BitToSet); 2675 Op1 = ConstantInt::get(V->getContext(), API); 2676 } 2677 2678 Value *Mul0 = nullptr; 2679 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2680 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2681 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2682 if (Op1C->getType()->getPrimitiveSizeInBits() < 2683 MulC->getType()->getPrimitiveSizeInBits()) 2684 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2685 if (Op1C->getType()->getPrimitiveSizeInBits() > 2686 MulC->getType()->getPrimitiveSizeInBits()) 2687 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2688 2689 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2690 Multiple = ConstantExpr::getMul(MulC, Op1C); 2691 return true; 2692 } 2693 2694 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2695 if (Mul0CI->getValue() == 1) { 2696 // V == Base * Op1, so return Op1 2697 Multiple = Op1; 2698 return true; 2699 } 2700 } 2701 2702 Value *Mul1 = nullptr; 2703 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2704 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2705 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2706 if (Op0C->getType()->getPrimitiveSizeInBits() < 2707 MulC->getType()->getPrimitiveSizeInBits()) 2708 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2709 if (Op0C->getType()->getPrimitiveSizeInBits() > 2710 MulC->getType()->getPrimitiveSizeInBits()) 2711 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2712 2713 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2714 Multiple = ConstantExpr::getMul(MulC, Op0C); 2715 return true; 2716 } 2717 2718 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2719 if (Mul1CI->getValue() == 1) { 2720 // V == Base * Op0, so return Op0 2721 Multiple = Op0; 2722 return true; 2723 } 2724 } 2725 } 2726 } 2727 2728 // We could not determine if V is a multiple of Base. 2729 return false; 2730 } 2731 2732 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS, 2733 const TargetLibraryInfo *TLI) { 2734 const Function *F = ICS.getCalledFunction(); 2735 if (!F) 2736 return Intrinsic::not_intrinsic; 2737 2738 if (F->isIntrinsic()) 2739 return F->getIntrinsicID(); 2740 2741 if (!TLI) 2742 return Intrinsic::not_intrinsic; 2743 2744 LibFunc Func; 2745 // We're going to make assumptions on the semantics of the functions, check 2746 // that the target knows that it's available in this environment and it does 2747 // not have local linkage. 2748 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func)) 2749 return Intrinsic::not_intrinsic; 2750 2751 if (!ICS.onlyReadsMemory()) 2752 return Intrinsic::not_intrinsic; 2753 2754 // Otherwise check if we have a call to a function that can be turned into a 2755 // vector intrinsic. 2756 switch (Func) { 2757 default: 2758 break; 2759 case LibFunc_sin: 2760 case LibFunc_sinf: 2761 case LibFunc_sinl: 2762 return Intrinsic::sin; 2763 case LibFunc_cos: 2764 case LibFunc_cosf: 2765 case LibFunc_cosl: 2766 return Intrinsic::cos; 2767 case LibFunc_exp: 2768 case LibFunc_expf: 2769 case LibFunc_expl: 2770 return Intrinsic::exp; 2771 case LibFunc_exp2: 2772 case LibFunc_exp2f: 2773 case LibFunc_exp2l: 2774 return Intrinsic::exp2; 2775 case LibFunc_log: 2776 case LibFunc_logf: 2777 case LibFunc_logl: 2778 return Intrinsic::log; 2779 case LibFunc_log10: 2780 case LibFunc_log10f: 2781 case LibFunc_log10l: 2782 return Intrinsic::log10; 2783 case LibFunc_log2: 2784 case LibFunc_log2f: 2785 case LibFunc_log2l: 2786 return Intrinsic::log2; 2787 case LibFunc_fabs: 2788 case LibFunc_fabsf: 2789 case LibFunc_fabsl: 2790 return Intrinsic::fabs; 2791 case LibFunc_fmin: 2792 case LibFunc_fminf: 2793 case LibFunc_fminl: 2794 return Intrinsic::minnum; 2795 case LibFunc_fmax: 2796 case LibFunc_fmaxf: 2797 case LibFunc_fmaxl: 2798 return Intrinsic::maxnum; 2799 case LibFunc_copysign: 2800 case LibFunc_copysignf: 2801 case LibFunc_copysignl: 2802 return Intrinsic::copysign; 2803 case LibFunc_floor: 2804 case LibFunc_floorf: 2805 case LibFunc_floorl: 2806 return Intrinsic::floor; 2807 case LibFunc_ceil: 2808 case LibFunc_ceilf: 2809 case LibFunc_ceill: 2810 return Intrinsic::ceil; 2811 case LibFunc_trunc: 2812 case LibFunc_truncf: 2813 case LibFunc_truncl: 2814 return Intrinsic::trunc; 2815 case LibFunc_rint: 2816 case LibFunc_rintf: 2817 case LibFunc_rintl: 2818 return Intrinsic::rint; 2819 case LibFunc_nearbyint: 2820 case LibFunc_nearbyintf: 2821 case LibFunc_nearbyintl: 2822 return Intrinsic::nearbyint; 2823 case LibFunc_round: 2824 case LibFunc_roundf: 2825 case LibFunc_roundl: 2826 return Intrinsic::round; 2827 case LibFunc_pow: 2828 case LibFunc_powf: 2829 case LibFunc_powl: 2830 return Intrinsic::pow; 2831 case LibFunc_sqrt: 2832 case LibFunc_sqrtf: 2833 case LibFunc_sqrtl: 2834 return Intrinsic::sqrt; 2835 } 2836 2837 return Intrinsic::not_intrinsic; 2838 } 2839 2840 /// Return true if we can prove that the specified FP value is never equal to 2841 /// -0.0. 2842 /// 2843 /// NOTE: this function will need to be revisited when we support non-default 2844 /// rounding modes! 2845 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 2846 unsigned Depth) { 2847 if (auto *CFP = dyn_cast<ConstantFP>(V)) 2848 return !CFP->getValueAPF().isNegZero(); 2849 2850 // Limit search depth. 2851 if (Depth == MaxDepth) 2852 return false; 2853 2854 auto *Op = dyn_cast<Operator>(V); 2855 if (!Op) 2856 return false; 2857 2858 // Check if the nsz fast-math flag is set. 2859 if (auto *FPO = dyn_cast<FPMathOperator>(Op)) 2860 if (FPO->hasNoSignedZeros()) 2861 return true; 2862 2863 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. 2864 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP()))) 2865 return true; 2866 2867 // sitofp and uitofp turn into +0.0 for zero. 2868 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) 2869 return true; 2870 2871 if (auto *Call = dyn_cast<CallInst>(Op)) { 2872 Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI); 2873 switch (IID) { 2874 default: 2875 break; 2876 // sqrt(-0.0) = -0.0, no other negative results are possible. 2877 case Intrinsic::sqrt: 2878 case Intrinsic::canonicalize: 2879 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); 2880 // fabs(x) != -0.0 2881 case Intrinsic::fabs: 2882 return true; 2883 } 2884 } 2885 2886 return false; 2887 } 2888 2889 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 2890 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 2891 /// bit despite comparing equal. 2892 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 2893 const TargetLibraryInfo *TLI, 2894 bool SignBitOnly, 2895 unsigned Depth) { 2896 // TODO: This function does not do the right thing when SignBitOnly is true 2897 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 2898 // which flips the sign bits of NaNs. See 2899 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2900 2901 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2902 return !CFP->getValueAPF().isNegative() || 2903 (!SignBitOnly && CFP->getValueAPF().isZero()); 2904 } 2905 2906 // Handle vector of constants. 2907 if (auto *CV = dyn_cast<Constant>(V)) { 2908 if (CV->getType()->isVectorTy()) { 2909 unsigned NumElts = CV->getType()->getVectorNumElements(); 2910 for (unsigned i = 0; i != NumElts; ++i) { 2911 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 2912 if (!CFP) 2913 return false; 2914 if (CFP->getValueAPF().isNegative() && 2915 (SignBitOnly || !CFP->getValueAPF().isZero())) 2916 return false; 2917 } 2918 2919 // All non-negative ConstantFPs. 2920 return true; 2921 } 2922 } 2923 2924 if (Depth == MaxDepth) 2925 return false; // Limit search depth. 2926 2927 const Operator *I = dyn_cast<Operator>(V); 2928 if (!I) 2929 return false; 2930 2931 switch (I->getOpcode()) { 2932 default: 2933 break; 2934 // Unsigned integers are always nonnegative. 2935 case Instruction::UIToFP: 2936 return true; 2937 case Instruction::FMul: 2938 // x*x is always non-negative or a NaN. 2939 if (I->getOperand(0) == I->getOperand(1) && 2940 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 2941 return true; 2942 2943 LLVM_FALLTHROUGH; 2944 case Instruction::FAdd: 2945 case Instruction::FDiv: 2946 case Instruction::FRem: 2947 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2948 Depth + 1) && 2949 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2950 Depth + 1); 2951 case Instruction::Select: 2952 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2953 Depth + 1) && 2954 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2955 Depth + 1); 2956 case Instruction::FPExt: 2957 case Instruction::FPTrunc: 2958 // Widening/narrowing never change sign. 2959 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2960 Depth + 1); 2961 case Instruction::ExtractElement: 2962 // Look through extract element. At the moment we keep this simple and skip 2963 // tracking the specific element. But at least we might find information 2964 // valid for all elements of the vector. 2965 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2966 Depth + 1); 2967 case Instruction::Call: 2968 const auto *CI = cast<CallInst>(I); 2969 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2970 switch (IID) { 2971 default: 2972 break; 2973 case Intrinsic::maxnum: 2974 return (isKnownNeverNaN(I->getOperand(0), TLI) && 2975 cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, 2976 SignBitOnly, Depth + 1)) || 2977 (isKnownNeverNaN(I->getOperand(1), TLI) && 2978 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, 2979 SignBitOnly, Depth + 1)); 2980 2981 case Intrinsic::maximum: 2982 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2983 Depth + 1) || 2984 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2985 Depth + 1); 2986 case Intrinsic::minnum: 2987 case Intrinsic::minimum: 2988 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2989 Depth + 1) && 2990 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2991 Depth + 1); 2992 case Intrinsic::exp: 2993 case Intrinsic::exp2: 2994 case Intrinsic::fabs: 2995 return true; 2996 2997 case Intrinsic::sqrt: 2998 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 2999 if (!SignBitOnly) 3000 return true; 3001 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 3002 CannotBeNegativeZero(CI->getOperand(0), TLI)); 3003 3004 case Intrinsic::powi: 3005 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 3006 // powi(x,n) is non-negative if n is even. 3007 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 3008 return true; 3009 } 3010 // TODO: This is not correct. Given that exp is an integer, here are the 3011 // ways that pow can return a negative value: 3012 // 3013 // pow(x, exp) --> negative if exp is odd and x is negative. 3014 // pow(-0, exp) --> -inf if exp is negative odd. 3015 // pow(-0, exp) --> -0 if exp is positive odd. 3016 // pow(-inf, exp) --> -0 if exp is negative odd. 3017 // pow(-inf, exp) --> -inf if exp is positive odd. 3018 // 3019 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 3020 // but we must return false if x == -0. Unfortunately we do not currently 3021 // have a way of expressing this constraint. See details in 3022 // https://llvm.org/bugs/show_bug.cgi?id=31702. 3023 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3024 Depth + 1); 3025 3026 case Intrinsic::fma: 3027 case Intrinsic::fmuladd: 3028 // x*x+y is non-negative if y is non-negative. 3029 return I->getOperand(0) == I->getOperand(1) && 3030 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 3031 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 3032 Depth + 1); 3033 } 3034 break; 3035 } 3036 return false; 3037 } 3038 3039 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 3040 const TargetLibraryInfo *TLI) { 3041 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 3042 } 3043 3044 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 3045 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 3046 } 3047 3048 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, 3049 unsigned Depth) { 3050 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"); 3051 3052 // If we're told that NaNs won't happen, assume they won't. 3053 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 3054 if (FPMathOp->hasNoNaNs()) 3055 return true; 3056 3057 // Handle scalar constants. 3058 if (auto *CFP = dyn_cast<ConstantFP>(V)) 3059 return !CFP->isNaN(); 3060 3061 if (Depth == MaxDepth) 3062 return false; 3063 3064 if (auto *Inst = dyn_cast<Instruction>(V)) { 3065 switch (Inst->getOpcode()) { 3066 case Instruction::FAdd: 3067 case Instruction::FMul: 3068 case Instruction::FSub: 3069 case Instruction::FDiv: 3070 case Instruction::FRem: { 3071 // TODO: Need isKnownNeverInfinity 3072 return false; 3073 } 3074 case Instruction::Select: { 3075 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 3076 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1); 3077 } 3078 case Instruction::SIToFP: 3079 case Instruction::UIToFP: 3080 return true; 3081 case Instruction::FPTrunc: 3082 case Instruction::FPExt: 3083 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1); 3084 default: 3085 break; 3086 } 3087 } 3088 3089 if (const auto *II = dyn_cast<IntrinsicInst>(V)) { 3090 switch (II->getIntrinsicID()) { 3091 case Intrinsic::canonicalize: 3092 case Intrinsic::fabs: 3093 case Intrinsic::copysign: 3094 case Intrinsic::exp: 3095 case Intrinsic::exp2: 3096 case Intrinsic::floor: 3097 case Intrinsic::ceil: 3098 case Intrinsic::trunc: 3099 case Intrinsic::rint: 3100 case Intrinsic::nearbyint: 3101 case Intrinsic::round: 3102 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1); 3103 case Intrinsic::sqrt: 3104 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) && 3105 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI); 3106 default: 3107 return false; 3108 } 3109 } 3110 3111 // Bail out for constant expressions, but try to handle vector constants. 3112 if (!V->getType()->isVectorTy() || !isa<Constant>(V)) 3113 return false; 3114 3115 // For vectors, verify that each element is not NaN. 3116 unsigned NumElts = V->getType()->getVectorNumElements(); 3117 for (unsigned i = 0; i != NumElts; ++i) { 3118 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 3119 if (!Elt) 3120 return false; 3121 if (isa<UndefValue>(Elt)) 3122 continue; 3123 auto *CElt = dyn_cast<ConstantFP>(Elt); 3124 if (!CElt || CElt->isNaN()) 3125 return false; 3126 } 3127 // All elements were confirmed not-NaN or undefined. 3128 return true; 3129 } 3130 3131 Value *llvm::isBytewiseValue(Value *V) { 3132 3133 // All byte-wide stores are splatable, even of arbitrary variables. 3134 if (V->getType()->isIntegerTy(8)) 3135 return V; 3136 3137 LLVMContext &Ctx = V->getContext(); 3138 3139 // Undef don't care. 3140 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx)); 3141 if (isa<UndefValue>(V)) 3142 return UndefInt8; 3143 3144 Constant *C = dyn_cast<Constant>(V); 3145 if (!C) { 3146 // Conceptually, we could handle things like: 3147 // %a = zext i8 %X to i16 3148 // %b = shl i16 %a, 8 3149 // %c = or i16 %a, %b 3150 // but until there is an example that actually needs this, it doesn't seem 3151 // worth worrying about. 3152 return nullptr; 3153 } 3154 3155 // Handle 'null' ConstantArrayZero etc. 3156 if (C->isNullValue()) 3157 return Constant::getNullValue(Type::getInt8Ty(Ctx)); 3158 3159 // Constant floating-point values can be handled as integer values if the 3160 // corresponding integer value is "byteable". An important case is 0.0. 3161 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 3162 Type *Ty = nullptr; 3163 if (CFP->getType()->isHalfTy()) 3164 Ty = Type::getInt16Ty(Ctx); 3165 else if (CFP->getType()->isFloatTy()) 3166 Ty = Type::getInt32Ty(Ctx); 3167 else if (CFP->getType()->isDoubleTy()) 3168 Ty = Type::getInt64Ty(Ctx); 3169 // Don't handle long double formats, which have strange constraints. 3170 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty)) : nullptr; 3171 } 3172 3173 // We can handle constant integers that are multiple of 8 bits. 3174 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 3175 if (CI->getBitWidth() % 8 == 0) { 3176 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 3177 if (!CI->getValue().isSplat(8)) 3178 return nullptr; 3179 return ConstantInt::get(Ctx, CI->getValue().trunc(8)); 3180 } 3181 } 3182 3183 auto Merge = [&](Value *LHS, Value *RHS) -> Value * { 3184 if (LHS == RHS) 3185 return LHS; 3186 if (!LHS || !RHS) 3187 return nullptr; 3188 if (LHS == UndefInt8) 3189 return RHS; 3190 if (RHS == UndefInt8) 3191 return LHS; 3192 return nullptr; 3193 }; 3194 3195 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) { 3196 Value *Val = UndefInt8; 3197 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) 3198 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I))))) 3199 return nullptr; 3200 return Val; 3201 } 3202 3203 if (isa<ConstantVector>(C)) { 3204 Constant *Splat = cast<ConstantVector>(C)->getSplatValue(); 3205 return Splat ? isBytewiseValue(Splat) : nullptr; 3206 } 3207 3208 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) { 3209 Value *Val = UndefInt8; 3210 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) 3211 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I))))) 3212 return nullptr; 3213 return Val; 3214 } 3215 3216 // Don't try to handle the handful of other constants. 3217 return nullptr; 3218 } 3219 3220 // This is the recursive version of BuildSubAggregate. It takes a few different 3221 // arguments. Idxs is the index within the nested struct From that we are 3222 // looking at now (which is of type IndexedType). IdxSkip is the number of 3223 // indices from Idxs that should be left out when inserting into the resulting 3224 // struct. To is the result struct built so far, new insertvalue instructions 3225 // build on that. 3226 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 3227 SmallVectorImpl<unsigned> &Idxs, 3228 unsigned IdxSkip, 3229 Instruction *InsertBefore) { 3230 StructType *STy = dyn_cast<StructType>(IndexedType); 3231 if (STy) { 3232 // Save the original To argument so we can modify it 3233 Value *OrigTo = To; 3234 // General case, the type indexed by Idxs is a struct 3235 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3236 // Process each struct element recursively 3237 Idxs.push_back(i); 3238 Value *PrevTo = To; 3239 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 3240 InsertBefore); 3241 Idxs.pop_back(); 3242 if (!To) { 3243 // Couldn't find any inserted value for this index? Cleanup 3244 while (PrevTo != OrigTo) { 3245 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 3246 PrevTo = Del->getAggregateOperand(); 3247 Del->eraseFromParent(); 3248 } 3249 // Stop processing elements 3250 break; 3251 } 3252 } 3253 // If we successfully found a value for each of our subaggregates 3254 if (To) 3255 return To; 3256 } 3257 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 3258 // the struct's elements had a value that was inserted directly. In the latter 3259 // case, perhaps we can't determine each of the subelements individually, but 3260 // we might be able to find the complete struct somewhere. 3261 3262 // Find the value that is at that particular spot 3263 Value *V = FindInsertedValue(From, Idxs); 3264 3265 if (!V) 3266 return nullptr; 3267 3268 // Insert the value in the new (sub) aggregate 3269 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 3270 "tmp", InsertBefore); 3271 } 3272 3273 // This helper takes a nested struct and extracts a part of it (which is again a 3274 // struct) into a new value. For example, given the struct: 3275 // { a, { b, { c, d }, e } } 3276 // and the indices "1, 1" this returns 3277 // { c, d }. 3278 // 3279 // It does this by inserting an insertvalue for each element in the resulting 3280 // struct, as opposed to just inserting a single struct. This will only work if 3281 // each of the elements of the substruct are known (ie, inserted into From by an 3282 // insertvalue instruction somewhere). 3283 // 3284 // All inserted insertvalue instructions are inserted before InsertBefore 3285 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 3286 Instruction *InsertBefore) { 3287 assert(InsertBefore && "Must have someplace to insert!"); 3288 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 3289 idx_range); 3290 Value *To = UndefValue::get(IndexedType); 3291 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 3292 unsigned IdxSkip = Idxs.size(); 3293 3294 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 3295 } 3296 3297 /// Given an aggregate and a sequence of indices, see if the scalar value 3298 /// indexed is already around as a register, for example if it was inserted 3299 /// directly into the aggregate. 3300 /// 3301 /// If InsertBefore is not null, this function will duplicate (modified) 3302 /// insertvalues when a part of a nested struct is extracted. 3303 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 3304 Instruction *InsertBefore) { 3305 // Nothing to index? Just return V then (this is useful at the end of our 3306 // recursion). 3307 if (idx_range.empty()) 3308 return V; 3309 // We have indices, so V should have an indexable type. 3310 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 3311 "Not looking at a struct or array?"); 3312 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 3313 "Invalid indices for type?"); 3314 3315 if (Constant *C = dyn_cast<Constant>(V)) { 3316 C = C->getAggregateElement(idx_range[0]); 3317 if (!C) return nullptr; 3318 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 3319 } 3320 3321 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 3322 // Loop the indices for the insertvalue instruction in parallel with the 3323 // requested indices 3324 const unsigned *req_idx = idx_range.begin(); 3325 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 3326 i != e; ++i, ++req_idx) { 3327 if (req_idx == idx_range.end()) { 3328 // We can't handle this without inserting insertvalues 3329 if (!InsertBefore) 3330 return nullptr; 3331 3332 // The requested index identifies a part of a nested aggregate. Handle 3333 // this specially. For example, 3334 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 3335 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 3336 // %C = extractvalue {i32, { i32, i32 } } %B, 1 3337 // This can be changed into 3338 // %A = insertvalue {i32, i32 } undef, i32 10, 0 3339 // %C = insertvalue {i32, i32 } %A, i32 11, 1 3340 // which allows the unused 0,0 element from the nested struct to be 3341 // removed. 3342 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 3343 InsertBefore); 3344 } 3345 3346 // This insert value inserts something else than what we are looking for. 3347 // See if the (aggregate) value inserted into has the value we are 3348 // looking for, then. 3349 if (*req_idx != *i) 3350 return FindInsertedValue(I->getAggregateOperand(), idx_range, 3351 InsertBefore); 3352 } 3353 // If we end up here, the indices of the insertvalue match with those 3354 // requested (though possibly only partially). Now we recursively look at 3355 // the inserted value, passing any remaining indices. 3356 return FindInsertedValue(I->getInsertedValueOperand(), 3357 makeArrayRef(req_idx, idx_range.end()), 3358 InsertBefore); 3359 } 3360 3361 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 3362 // If we're extracting a value from an aggregate that was extracted from 3363 // something else, we can extract from that something else directly instead. 3364 // However, we will need to chain I's indices with the requested indices. 3365 3366 // Calculate the number of indices required 3367 unsigned size = I->getNumIndices() + idx_range.size(); 3368 // Allocate some space to put the new indices in 3369 SmallVector<unsigned, 5> Idxs; 3370 Idxs.reserve(size); 3371 // Add indices from the extract value instruction 3372 Idxs.append(I->idx_begin(), I->idx_end()); 3373 3374 // Add requested indices 3375 Idxs.append(idx_range.begin(), idx_range.end()); 3376 3377 assert(Idxs.size() == size 3378 && "Number of indices added not correct?"); 3379 3380 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 3381 } 3382 // Otherwise, we don't know (such as, extracting from a function return value 3383 // or load instruction) 3384 return nullptr; 3385 } 3386 3387 /// Analyze the specified pointer to see if it can be expressed as a base 3388 /// pointer plus a constant offset. Return the base and offset to the caller. 3389 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 3390 const DataLayout &DL) { 3391 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType()); 3392 APInt ByteOffset(BitWidth, 0); 3393 3394 // We walk up the defs but use a visited set to handle unreachable code. In 3395 // that case, we stop after accumulating the cycle once (not that it 3396 // matters). 3397 SmallPtrSet<Value *, 16> Visited; 3398 while (Visited.insert(Ptr).second) { 3399 if (Ptr->getType()->isVectorTy()) 3400 break; 3401 3402 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 3403 // If one of the values we have visited is an addrspacecast, then 3404 // the pointer type of this GEP may be different from the type 3405 // of the Ptr parameter which was passed to this function. This 3406 // means when we construct GEPOffset, we need to use the size 3407 // of GEP's pointer type rather than the size of the original 3408 // pointer type. 3409 APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 3410 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 3411 break; 3412 3413 APInt OrigByteOffset(ByteOffset); 3414 ByteOffset += GEPOffset.sextOrTrunc(ByteOffset.getBitWidth()); 3415 if (ByteOffset.getMinSignedBits() > 64) { 3416 // Stop traversal if the pointer offset wouldn't fit into int64_t 3417 // (this should be removed if Offset is updated to an APInt) 3418 ByteOffset = OrigByteOffset; 3419 break; 3420 } 3421 3422 Ptr = GEP->getPointerOperand(); 3423 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 3424 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 3425 Ptr = cast<Operator>(Ptr)->getOperand(0); 3426 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 3427 if (GA->isInterposable()) 3428 break; 3429 Ptr = GA->getAliasee(); 3430 } else { 3431 break; 3432 } 3433 } 3434 Offset = ByteOffset.getSExtValue(); 3435 return Ptr; 3436 } 3437 3438 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 3439 unsigned CharSize) { 3440 // Make sure the GEP has exactly three arguments. 3441 if (GEP->getNumOperands() != 3) 3442 return false; 3443 3444 // Make sure the index-ee is a pointer to array of \p CharSize integers. 3445 // CharSize. 3446 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 3447 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 3448 return false; 3449 3450 // Check to make sure that the first operand of the GEP is an integer and 3451 // has value 0 so that we are sure we're indexing into the initializer. 3452 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 3453 if (!FirstIdx || !FirstIdx->isZero()) 3454 return false; 3455 3456 return true; 3457 } 3458 3459 bool llvm::getConstantDataArrayInfo(const Value *V, 3460 ConstantDataArraySlice &Slice, 3461 unsigned ElementSize, uint64_t Offset) { 3462 assert(V); 3463 3464 // Look through bitcast instructions and geps. 3465 V = V->stripPointerCasts(); 3466 3467 // If the value is a GEP instruction or constant expression, treat it as an 3468 // offset. 3469 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3470 // The GEP operator should be based on a pointer to string constant, and is 3471 // indexing into the string constant. 3472 if (!isGEPBasedOnPointerToString(GEP, ElementSize)) 3473 return false; 3474 3475 // If the second index isn't a ConstantInt, then this is a variable index 3476 // into the array. If this occurs, we can't say anything meaningful about 3477 // the string. 3478 uint64_t StartIdx = 0; 3479 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 3480 StartIdx = CI->getZExtValue(); 3481 else 3482 return false; 3483 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize, 3484 StartIdx + Offset); 3485 } 3486 3487 // The GEP instruction, constant or instruction, must reference a global 3488 // variable that is a constant and is initialized. The referenced constant 3489 // initializer is the array that we'll use for optimization. 3490 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 3491 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 3492 return false; 3493 3494 const ConstantDataArray *Array; 3495 ArrayType *ArrayTy; 3496 if (GV->getInitializer()->isNullValue()) { 3497 Type *GVTy = GV->getValueType(); 3498 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) { 3499 // A zeroinitializer for the array; there is no ConstantDataArray. 3500 Array = nullptr; 3501 } else { 3502 const DataLayout &DL = GV->getParent()->getDataLayout(); 3503 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy); 3504 uint64_t Length = SizeInBytes / (ElementSize / 8); 3505 if (Length <= Offset) 3506 return false; 3507 3508 Slice.Array = nullptr; 3509 Slice.Offset = 0; 3510 Slice.Length = Length - Offset; 3511 return true; 3512 } 3513 } else { 3514 // This must be a ConstantDataArray. 3515 Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 3516 if (!Array) 3517 return false; 3518 ArrayTy = Array->getType(); 3519 } 3520 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize)) 3521 return false; 3522 3523 uint64_t NumElts = ArrayTy->getArrayNumElements(); 3524 if (Offset > NumElts) 3525 return false; 3526 3527 Slice.Array = Array; 3528 Slice.Offset = Offset; 3529 Slice.Length = NumElts - Offset; 3530 return true; 3531 } 3532 3533 /// This function computes the length of a null-terminated C string pointed to 3534 /// by V. If successful, it returns true and returns the string in Str. 3535 /// If unsuccessful, it returns false. 3536 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 3537 uint64_t Offset, bool TrimAtNul) { 3538 ConstantDataArraySlice Slice; 3539 if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) 3540 return false; 3541 3542 if (Slice.Array == nullptr) { 3543 if (TrimAtNul) { 3544 Str = StringRef(); 3545 return true; 3546 } 3547 if (Slice.Length == 1) { 3548 Str = StringRef("", 1); 3549 return true; 3550 } 3551 // We cannot instantiate a StringRef as we do not have an appropriate string 3552 // of 0s at hand. 3553 return false; 3554 } 3555 3556 // Start out with the entire array in the StringRef. 3557 Str = Slice.Array->getAsString(); 3558 // Skip over 'offset' bytes. 3559 Str = Str.substr(Slice.Offset); 3560 3561 if (TrimAtNul) { 3562 // Trim off the \0 and anything after it. If the array is not nul 3563 // terminated, we just return the whole end of string. The client may know 3564 // some other way that the string is length-bound. 3565 Str = Str.substr(0, Str.find('\0')); 3566 } 3567 return true; 3568 } 3569 3570 // These next two are very similar to the above, but also look through PHI 3571 // nodes. 3572 // TODO: See if we can integrate these two together. 3573 3574 /// If we can compute the length of the string pointed to by 3575 /// the specified pointer, return 'len+1'. If we can't, return 0. 3576 static uint64_t GetStringLengthH(const Value *V, 3577 SmallPtrSetImpl<const PHINode*> &PHIs, 3578 unsigned CharSize) { 3579 // Look through noop bitcast instructions. 3580 V = V->stripPointerCasts(); 3581 3582 // If this is a PHI node, there are two cases: either we have already seen it 3583 // or we haven't. 3584 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 3585 if (!PHIs.insert(PN).second) 3586 return ~0ULL; // already in the set. 3587 3588 // If it was new, see if all the input strings are the same length. 3589 uint64_t LenSoFar = ~0ULL; 3590 for (Value *IncValue : PN->incoming_values()) { 3591 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 3592 if (Len == 0) return 0; // Unknown length -> unknown. 3593 3594 if (Len == ~0ULL) continue; 3595 3596 if (Len != LenSoFar && LenSoFar != ~0ULL) 3597 return 0; // Disagree -> unknown. 3598 LenSoFar = Len; 3599 } 3600 3601 // Success, all agree. 3602 return LenSoFar; 3603 } 3604 3605 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 3606 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 3607 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 3608 if (Len1 == 0) return 0; 3609 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 3610 if (Len2 == 0) return 0; 3611 if (Len1 == ~0ULL) return Len2; 3612 if (Len2 == ~0ULL) return Len1; 3613 if (Len1 != Len2) return 0; 3614 return Len1; 3615 } 3616 3617 // Otherwise, see if we can read the string. 3618 ConstantDataArraySlice Slice; 3619 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 3620 return 0; 3621 3622 if (Slice.Array == nullptr) 3623 return 1; 3624 3625 // Search for nul characters 3626 unsigned NullIndex = 0; 3627 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 3628 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 3629 break; 3630 } 3631 3632 return NullIndex + 1; 3633 } 3634 3635 /// If we can compute the length of the string pointed to by 3636 /// the specified pointer, return 'len+1'. If we can't, return 0. 3637 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 3638 if (!V->getType()->isPointerTy()) 3639 return 0; 3640 3641 SmallPtrSet<const PHINode*, 32> PHIs; 3642 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 3643 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 3644 // an empty string as a length. 3645 return Len == ~0ULL ? 1 : Len; 3646 } 3647 3648 const Value *llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call) { 3649 assert(Call && 3650 "getArgumentAliasingToReturnedPointer only works on nonnull calls"); 3651 if (const Value *RV = Call->getReturnedArgOperand()) 3652 return RV; 3653 // This can be used only as a aliasing property. 3654 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call)) 3655 return Call->getArgOperand(0); 3656 return nullptr; 3657 } 3658 3659 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 3660 const CallBase *Call) { 3661 return Call->getIntrinsicID() == Intrinsic::launder_invariant_group || 3662 Call->getIntrinsicID() == Intrinsic::strip_invariant_group; 3663 } 3664 3665 /// \p PN defines a loop-variant pointer to an object. Check if the 3666 /// previous iteration of the loop was referring to the same object as \p PN. 3667 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 3668 const LoopInfo *LI) { 3669 // Find the loop-defined value. 3670 Loop *L = LI->getLoopFor(PN->getParent()); 3671 if (PN->getNumIncomingValues() != 2) 3672 return true; 3673 3674 // Find the value from previous iteration. 3675 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 3676 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3677 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 3678 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3679 return true; 3680 3681 // If a new pointer is loaded in the loop, the pointer references a different 3682 // object in every iteration. E.g.: 3683 // for (i) 3684 // int *p = a[i]; 3685 // ... 3686 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 3687 if (!L->isLoopInvariant(Load->getPointerOperand())) 3688 return false; 3689 return true; 3690 } 3691 3692 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 3693 unsigned MaxLookup) { 3694 if (!V->getType()->isPointerTy()) 3695 return V; 3696 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 3697 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3698 V = GEP->getPointerOperand(); 3699 } else if (Operator::getOpcode(V) == Instruction::BitCast || 3700 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 3701 V = cast<Operator>(V)->getOperand(0); 3702 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 3703 if (GA->isInterposable()) 3704 return V; 3705 V = GA->getAliasee(); 3706 } else if (isa<AllocaInst>(V)) { 3707 // An alloca can't be further simplified. 3708 return V; 3709 } else { 3710 if (auto *Call = dyn_cast<CallBase>(V)) { 3711 // CaptureTracking can know about special capturing properties of some 3712 // intrinsics like launder.invariant.group, that can't be expressed with 3713 // the attributes, but have properties like returning aliasing pointer. 3714 // Because some analysis may assume that nocaptured pointer is not 3715 // returned from some special intrinsic (because function would have to 3716 // be marked with returns attribute), it is crucial to use this function 3717 // because it should be in sync with CaptureTracking. Not using it may 3718 // cause weird miscompilations where 2 aliasing pointers are assumed to 3719 // noalias. 3720 if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) { 3721 V = RP; 3722 continue; 3723 } 3724 } 3725 3726 // See if InstructionSimplify knows any relevant tricks. 3727 if (Instruction *I = dyn_cast<Instruction>(V)) 3728 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 3729 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) { 3730 V = Simplified; 3731 continue; 3732 } 3733 3734 return V; 3735 } 3736 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 3737 } 3738 return V; 3739 } 3740 3741 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 3742 const DataLayout &DL, LoopInfo *LI, 3743 unsigned MaxLookup) { 3744 SmallPtrSet<Value *, 4> Visited; 3745 SmallVector<Value *, 4> Worklist; 3746 Worklist.push_back(V); 3747 do { 3748 Value *P = Worklist.pop_back_val(); 3749 P = GetUnderlyingObject(P, DL, MaxLookup); 3750 3751 if (!Visited.insert(P).second) 3752 continue; 3753 3754 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 3755 Worklist.push_back(SI->getTrueValue()); 3756 Worklist.push_back(SI->getFalseValue()); 3757 continue; 3758 } 3759 3760 if (PHINode *PN = dyn_cast<PHINode>(P)) { 3761 // If this PHI changes the underlying object in every iteration of the 3762 // loop, don't look through it. Consider: 3763 // int **A; 3764 // for (i) { 3765 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 3766 // Curr = A[i]; 3767 // *Prev, *Curr; 3768 // 3769 // Prev is tracking Curr one iteration behind so they refer to different 3770 // underlying objects. 3771 if (!LI || !LI->isLoopHeader(PN->getParent()) || 3772 isSameUnderlyingObjectInLoop(PN, LI)) 3773 for (Value *IncValue : PN->incoming_values()) 3774 Worklist.push_back(IncValue); 3775 continue; 3776 } 3777 3778 Objects.push_back(P); 3779 } while (!Worklist.empty()); 3780 } 3781 3782 /// This is the function that does the work of looking through basic 3783 /// ptrtoint+arithmetic+inttoptr sequences. 3784 static const Value *getUnderlyingObjectFromInt(const Value *V) { 3785 do { 3786 if (const Operator *U = dyn_cast<Operator>(V)) { 3787 // If we find a ptrtoint, we can transfer control back to the 3788 // regular getUnderlyingObjectFromInt. 3789 if (U->getOpcode() == Instruction::PtrToInt) 3790 return U->getOperand(0); 3791 // If we find an add of a constant, a multiplied value, or a phi, it's 3792 // likely that the other operand will lead us to the base 3793 // object. We don't have to worry about the case where the 3794 // object address is somehow being computed by the multiply, 3795 // because our callers only care when the result is an 3796 // identifiable object. 3797 if (U->getOpcode() != Instruction::Add || 3798 (!isa<ConstantInt>(U->getOperand(1)) && 3799 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 3800 !isa<PHINode>(U->getOperand(1)))) 3801 return V; 3802 V = U->getOperand(0); 3803 } else { 3804 return V; 3805 } 3806 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 3807 } while (true); 3808 } 3809 3810 /// This is a wrapper around GetUnderlyingObjects and adds support for basic 3811 /// ptrtoint+arithmetic+inttoptr sequences. 3812 /// It returns false if unidentified object is found in GetUnderlyingObjects. 3813 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, 3814 SmallVectorImpl<Value *> &Objects, 3815 const DataLayout &DL) { 3816 SmallPtrSet<const Value *, 16> Visited; 3817 SmallVector<const Value *, 4> Working(1, V); 3818 do { 3819 V = Working.pop_back_val(); 3820 3821 SmallVector<Value *, 4> Objs; 3822 GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL); 3823 3824 for (Value *V : Objs) { 3825 if (!Visited.insert(V).second) 3826 continue; 3827 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 3828 const Value *O = 3829 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 3830 if (O->getType()->isPointerTy()) { 3831 Working.push_back(O); 3832 continue; 3833 } 3834 } 3835 // If GetUnderlyingObjects fails to find an identifiable object, 3836 // getUnderlyingObjectsForCodeGen also fails for safety. 3837 if (!isIdentifiedObject(V)) { 3838 Objects.clear(); 3839 return false; 3840 } 3841 Objects.push_back(const_cast<Value *>(V)); 3842 } 3843 } while (!Working.empty()); 3844 return true; 3845 } 3846 3847 /// Return true if the only users of this pointer are lifetime markers. 3848 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 3849 for (const User *U : V->users()) { 3850 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3851 if (!II) return false; 3852 3853 if (!II->isLifetimeStartOrEnd()) 3854 return false; 3855 } 3856 return true; 3857 } 3858 3859 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3860 const Instruction *CtxI, 3861 const DominatorTree *DT) { 3862 const Operator *Inst = dyn_cast<Operator>(V); 3863 if (!Inst) 3864 return false; 3865 3866 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3867 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3868 if (C->canTrap()) 3869 return false; 3870 3871 switch (Inst->getOpcode()) { 3872 default: 3873 return true; 3874 case Instruction::UDiv: 3875 case Instruction::URem: { 3876 // x / y is undefined if y == 0. 3877 const APInt *V; 3878 if (match(Inst->getOperand(1), m_APInt(V))) 3879 return *V != 0; 3880 return false; 3881 } 3882 case Instruction::SDiv: 3883 case Instruction::SRem: { 3884 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3885 const APInt *Numerator, *Denominator; 3886 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3887 return false; 3888 // We cannot hoist this division if the denominator is 0. 3889 if (*Denominator == 0) 3890 return false; 3891 // It's safe to hoist if the denominator is not 0 or -1. 3892 if (*Denominator != -1) 3893 return true; 3894 // At this point we know that the denominator is -1. It is safe to hoist as 3895 // long we know that the numerator is not INT_MIN. 3896 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3897 return !Numerator->isMinSignedValue(); 3898 // The numerator *might* be MinSignedValue. 3899 return false; 3900 } 3901 case Instruction::Load: { 3902 const LoadInst *LI = cast<LoadInst>(Inst); 3903 if (!LI->isUnordered() || 3904 // Speculative load may create a race that did not exist in the source. 3905 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) || 3906 // Speculative load may load data from dirty regions. 3907 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 3908 LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 3909 return false; 3910 const DataLayout &DL = LI->getModule()->getDataLayout(); 3911 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), 3912 LI->getAlignment(), DL, CtxI, DT); 3913 } 3914 case Instruction::Call: { 3915 auto *CI = cast<const CallInst>(Inst); 3916 const Function *Callee = CI->getCalledFunction(); 3917 3918 // The called function could have undefined behavior or side-effects, even 3919 // if marked readnone nounwind. 3920 return Callee && Callee->isSpeculatable(); 3921 } 3922 case Instruction::VAArg: 3923 case Instruction::Alloca: 3924 case Instruction::Invoke: 3925 case Instruction::PHI: 3926 case Instruction::Store: 3927 case Instruction::Ret: 3928 case Instruction::Br: 3929 case Instruction::IndirectBr: 3930 case Instruction::Switch: 3931 case Instruction::Unreachable: 3932 case Instruction::Fence: 3933 case Instruction::AtomicRMW: 3934 case Instruction::AtomicCmpXchg: 3935 case Instruction::LandingPad: 3936 case Instruction::Resume: 3937 case Instruction::CatchSwitch: 3938 case Instruction::CatchPad: 3939 case Instruction::CatchRet: 3940 case Instruction::CleanupPad: 3941 case Instruction::CleanupRet: 3942 return false; // Misc instructions which have effects 3943 } 3944 } 3945 3946 bool llvm::mayBeMemoryDependent(const Instruction &I) { 3947 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 3948 } 3949 3950 OverflowResult llvm::computeOverflowForUnsignedMul( 3951 const Value *LHS, const Value *RHS, const DataLayout &DL, 3952 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 3953 bool UseInstrInfo) { 3954 // Multiplying n * m significant bits yields a result of n + m significant 3955 // bits. If the total number of significant bits does not exceed the 3956 // result bit width (minus 1), there is no overflow. 3957 // This means if we have enough leading zero bits in the operands 3958 // we can guarantee that the result does not overflow. 3959 // Ref: "Hacker's Delight" by Henry Warren 3960 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3961 KnownBits LHSKnown(BitWidth); 3962 KnownBits RHSKnown(BitWidth); 3963 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr, 3964 UseInstrInfo); 3965 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr, 3966 UseInstrInfo); 3967 // Note that underestimating the number of zero bits gives a more 3968 // conservative answer. 3969 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() + 3970 RHSKnown.countMinLeadingZeros(); 3971 // First handle the easy case: if we have enough zero bits there's 3972 // definitely no overflow. 3973 if (ZeroBits >= BitWidth) 3974 return OverflowResult::NeverOverflows; 3975 3976 // Get the largest possible values for each operand. 3977 APInt LHSMax = ~LHSKnown.Zero; 3978 APInt RHSMax = ~RHSKnown.Zero; 3979 3980 // We know the multiply operation doesn't overflow if the maximum values for 3981 // each operand will not overflow after we multiply them together. 3982 bool MaxOverflow; 3983 (void)LHSMax.umul_ov(RHSMax, MaxOverflow); 3984 if (!MaxOverflow) 3985 return OverflowResult::NeverOverflows; 3986 3987 // We know it always overflows if multiplying the smallest possible values for 3988 // the operands also results in overflow. 3989 bool MinOverflow; 3990 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow); 3991 if (MinOverflow) 3992 return OverflowResult::AlwaysOverflows; 3993 3994 return OverflowResult::MayOverflow; 3995 } 3996 3997 OverflowResult 3998 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS, 3999 const DataLayout &DL, AssumptionCache *AC, 4000 const Instruction *CxtI, 4001 const DominatorTree *DT, bool UseInstrInfo) { 4002 // Multiplying n * m significant bits yields a result of n + m significant 4003 // bits. If the total number of significant bits does not exceed the 4004 // result bit width (minus 1), there is no overflow. 4005 // This means if we have enough leading sign bits in the operands 4006 // we can guarantee that the result does not overflow. 4007 // Ref: "Hacker's Delight" by Henry Warren 4008 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 4009 4010 // Note that underestimating the number of sign bits gives a more 4011 // conservative answer. 4012 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + 4013 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); 4014 4015 // First handle the easy case: if we have enough sign bits there's 4016 // definitely no overflow. 4017 if (SignBits > BitWidth + 1) 4018 return OverflowResult::NeverOverflows; 4019 4020 // There are two ambiguous cases where there can be no overflow: 4021 // SignBits == BitWidth + 1 and 4022 // SignBits == BitWidth 4023 // The second case is difficult to check, therefore we only handle the 4024 // first case. 4025 if (SignBits == BitWidth + 1) { 4026 // It overflows only when both arguments are negative and the true 4027 // product is exactly the minimum negative number. 4028 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 4029 // For simplicity we just check if at least one side is not negative. 4030 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 4031 nullptr, UseInstrInfo); 4032 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 4033 nullptr, UseInstrInfo); 4034 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) 4035 return OverflowResult::NeverOverflows; 4036 } 4037 return OverflowResult::MayOverflow; 4038 } 4039 4040 OverflowResult llvm::computeOverflowForUnsignedAdd( 4041 const Value *LHS, const Value *RHS, const DataLayout &DL, 4042 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4043 bool UseInstrInfo) { 4044 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 4045 nullptr, UseInstrInfo); 4046 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) { 4047 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 4048 nullptr, UseInstrInfo); 4049 4050 if (LHSKnown.isNegative() && RHSKnown.isNegative()) { 4051 // The sign bit is set in both cases: this MUST overflow. 4052 return OverflowResult::AlwaysOverflows; 4053 } 4054 4055 if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) { 4056 // The sign bit is clear in both cases: this CANNOT overflow. 4057 return OverflowResult::NeverOverflows; 4058 } 4059 } 4060 4061 return OverflowResult::MayOverflow; 4062 } 4063 4064 /// Return true if we can prove that adding the two values of the 4065 /// knownbits will not overflow. 4066 /// Otherwise return false. 4067 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown, 4068 const KnownBits &RHSKnown) { 4069 // Addition of two 2's complement numbers having opposite signs will never 4070 // overflow. 4071 if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) || 4072 (LHSKnown.isNonNegative() && RHSKnown.isNegative())) 4073 return true; 4074 4075 // If either of the values is known to be non-negative, adding them can only 4076 // overflow if the second is also non-negative, so we can assume that. 4077 // Two non-negative numbers will only overflow if there is a carry to the 4078 // sign bit, so we can check if even when the values are as big as possible 4079 // there is no overflow to the sign bit. 4080 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) { 4081 APInt MaxLHS = ~LHSKnown.Zero; 4082 MaxLHS.clearSignBit(); 4083 APInt MaxRHS = ~RHSKnown.Zero; 4084 MaxRHS.clearSignBit(); 4085 APInt Result = std::move(MaxLHS) + std::move(MaxRHS); 4086 return Result.isSignBitClear(); 4087 } 4088 4089 // If either of the values is known to be negative, adding them can only 4090 // overflow if the second is also negative, so we can assume that. 4091 // Two negative number will only overflow if there is no carry to the sign 4092 // bit, so we can check if even when the values are as small as possible 4093 // there is overflow to the sign bit. 4094 if (LHSKnown.isNegative() || RHSKnown.isNegative()) { 4095 APInt MinLHS = LHSKnown.One; 4096 MinLHS.clearSignBit(); 4097 APInt MinRHS = RHSKnown.One; 4098 MinRHS.clearSignBit(); 4099 APInt Result = std::move(MinLHS) + std::move(MinRHS); 4100 return Result.isSignBitSet(); 4101 } 4102 4103 // If we reached here it means that we know nothing about the sign bits. 4104 // In this case we can't know if there will be an overflow, since by 4105 // changing the sign bits any two values can be made to overflow. 4106 return false; 4107 } 4108 4109 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 4110 const Value *RHS, 4111 const AddOperator *Add, 4112 const DataLayout &DL, 4113 AssumptionCache *AC, 4114 const Instruction *CxtI, 4115 const DominatorTree *DT) { 4116 if (Add && Add->hasNoSignedWrap()) { 4117 return OverflowResult::NeverOverflows; 4118 } 4119 4120 // If LHS and RHS each have at least two sign bits, the addition will look 4121 // like 4122 // 4123 // XX..... + 4124 // YY..... 4125 // 4126 // If the carry into the most significant position is 0, X and Y can't both 4127 // be 1 and therefore the carry out of the addition is also 0. 4128 // 4129 // If the carry into the most significant position is 1, X and Y can't both 4130 // be 0 and therefore the carry out of the addition is also 1. 4131 // 4132 // Since the carry into the most significant position is always equal to 4133 // the carry out of the addition, there is no signed overflow. 4134 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4135 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4136 return OverflowResult::NeverOverflows; 4137 4138 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 4139 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 4140 4141 if (checkRippleForSignedAdd(LHSKnown, RHSKnown)) 4142 return OverflowResult::NeverOverflows; 4143 4144 // The remaining code needs Add to be available. Early returns if not so. 4145 if (!Add) 4146 return OverflowResult::MayOverflow; 4147 4148 // If the sign of Add is the same as at least one of the operands, this add 4149 // CANNOT overflow. This is particularly useful when the sum is 4150 // @llvm.assume'ed non-negative rather than proved so from analyzing its 4151 // operands. 4152 bool LHSOrRHSKnownNonNegative = 4153 (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()); 4154 bool LHSOrRHSKnownNegative = 4155 (LHSKnown.isNegative() || RHSKnown.isNegative()); 4156 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 4157 KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT); 4158 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 4159 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) { 4160 return OverflowResult::NeverOverflows; 4161 } 4162 } 4163 4164 return OverflowResult::MayOverflow; 4165 } 4166 4167 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, 4168 const Value *RHS, 4169 const DataLayout &DL, 4170 AssumptionCache *AC, 4171 const Instruction *CxtI, 4172 const DominatorTree *DT) { 4173 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 4174 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) { 4175 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 4176 4177 // If the LHS is negative and the RHS is non-negative, no unsigned wrap. 4178 if (LHSKnown.isNegative() && RHSKnown.isNonNegative()) 4179 return OverflowResult::NeverOverflows; 4180 4181 // If the LHS is non-negative and the RHS negative, we always wrap. 4182 if (LHSKnown.isNonNegative() && RHSKnown.isNegative()) 4183 return OverflowResult::AlwaysOverflows; 4184 } 4185 4186 return OverflowResult::MayOverflow; 4187 } 4188 4189 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, 4190 const Value *RHS, 4191 const DataLayout &DL, 4192 AssumptionCache *AC, 4193 const Instruction *CxtI, 4194 const DominatorTree *DT) { 4195 // If LHS and RHS each have at least two sign bits, the subtraction 4196 // cannot overflow. 4197 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4198 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4199 return OverflowResult::NeverOverflows; 4200 4201 KnownBits LHSKnown = computeKnownBits(LHS, DL, 0, AC, CxtI, DT); 4202 4203 KnownBits RHSKnown = computeKnownBits(RHS, DL, 0, AC, CxtI, DT); 4204 4205 // Subtraction of two 2's complement numbers having identical signs will 4206 // never overflow. 4207 if ((LHSKnown.isNegative() && RHSKnown.isNegative()) || 4208 (LHSKnown.isNonNegative() && RHSKnown.isNonNegative())) 4209 return OverflowResult::NeverOverflows; 4210 4211 // TODO: implement logic similar to checkRippleForAdd 4212 return OverflowResult::MayOverflow; 4213 } 4214 4215 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, 4216 const DominatorTree &DT) { 4217 #ifndef NDEBUG 4218 auto IID = II->getIntrinsicID(); 4219 assert((IID == Intrinsic::sadd_with_overflow || 4220 IID == Intrinsic::uadd_with_overflow || 4221 IID == Intrinsic::ssub_with_overflow || 4222 IID == Intrinsic::usub_with_overflow || 4223 IID == Intrinsic::smul_with_overflow || 4224 IID == Intrinsic::umul_with_overflow) && 4225 "Not an overflow intrinsic!"); 4226 #endif 4227 4228 SmallVector<const BranchInst *, 2> GuardingBranches; 4229 SmallVector<const ExtractValueInst *, 2> Results; 4230 4231 for (const User *U : II->users()) { 4232 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 4233 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 4234 4235 if (EVI->getIndices()[0] == 0) 4236 Results.push_back(EVI); 4237 else { 4238 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 4239 4240 for (const auto *U : EVI->users()) 4241 if (const auto *B = dyn_cast<BranchInst>(U)) { 4242 assert(B->isConditional() && "How else is it using an i1?"); 4243 GuardingBranches.push_back(B); 4244 } 4245 } 4246 } else { 4247 // We are using the aggregate directly in a way we don't want to analyze 4248 // here (storing it to a global, say). 4249 return false; 4250 } 4251 } 4252 4253 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 4254 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 4255 if (!NoWrapEdge.isSingleEdge()) 4256 return false; 4257 4258 // Check if all users of the add are provably no-wrap. 4259 for (const auto *Result : Results) { 4260 // If the extractvalue itself is not executed on overflow, the we don't 4261 // need to check each use separately, since domination is transitive. 4262 if (DT.dominates(NoWrapEdge, Result->getParent())) 4263 continue; 4264 4265 for (auto &RU : Result->uses()) 4266 if (!DT.dominates(NoWrapEdge, RU)) 4267 return false; 4268 } 4269 4270 return true; 4271 }; 4272 4273 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); 4274 } 4275 4276 4277 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 4278 const DataLayout &DL, 4279 AssumptionCache *AC, 4280 const Instruction *CxtI, 4281 const DominatorTree *DT) { 4282 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 4283 Add, DL, AC, CxtI, DT); 4284 } 4285 4286 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 4287 const Value *RHS, 4288 const DataLayout &DL, 4289 AssumptionCache *AC, 4290 const Instruction *CxtI, 4291 const DominatorTree *DT) { 4292 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 4293 } 4294 4295 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 4296 // A memory operation returns normally if it isn't volatile. A volatile 4297 // operation is allowed to trap. 4298 // 4299 // An atomic operation isn't guaranteed to return in a reasonable amount of 4300 // time because it's possible for another thread to interfere with it for an 4301 // arbitrary length of time, but programs aren't allowed to rely on that. 4302 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 4303 return !LI->isVolatile(); 4304 if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 4305 return !SI->isVolatile(); 4306 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 4307 return !CXI->isVolatile(); 4308 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 4309 return !RMWI->isVolatile(); 4310 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I)) 4311 return !MII->isVolatile(); 4312 4313 // If there is no successor, then execution can't transfer to it. 4314 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) 4315 return !CRI->unwindsToCaller(); 4316 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) 4317 return !CatchSwitch->unwindsToCaller(); 4318 if (isa<ResumeInst>(I)) 4319 return false; 4320 if (isa<ReturnInst>(I)) 4321 return false; 4322 if (isa<UnreachableInst>(I)) 4323 return false; 4324 4325 // Calls can throw, or contain an infinite loop, or kill the process. 4326 if (auto CS = ImmutableCallSite(I)) { 4327 // Call sites that throw have implicit non-local control flow. 4328 if (!CS.doesNotThrow()) 4329 return false; 4330 4331 // Non-throwing call sites can loop infinitely, call exit/pthread_exit 4332 // etc. and thus not return. However, LLVM already assumes that 4333 // 4334 // - Thread exiting actions are modeled as writes to memory invisible to 4335 // the program. 4336 // 4337 // - Loops that don't have side effects (side effects are volatile/atomic 4338 // stores and IO) always terminate (see http://llvm.org/PR965). 4339 // Furthermore IO itself is also modeled as writes to memory invisible to 4340 // the program. 4341 // 4342 // We rely on those assumptions here, and use the memory effects of the call 4343 // target as a proxy for checking that it always returns. 4344 4345 // FIXME: This isn't aggressive enough; a call which only writes to a global 4346 // is guaranteed to return. 4347 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() || 4348 match(I, m_Intrinsic<Intrinsic::assume>()) || 4349 match(I, m_Intrinsic<Intrinsic::sideeffect>()); 4350 } 4351 4352 // Other instructions return normally. 4353 return true; 4354 } 4355 4356 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { 4357 // TODO: This is slightly conservative for invoke instruction since exiting 4358 // via an exception *is* normal control for them. 4359 for (auto I = BB->begin(), E = BB->end(); I != E; ++I) 4360 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 4361 return false; 4362 return true; 4363 } 4364 4365 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 4366 const Loop *L) { 4367 // The loop header is guaranteed to be executed for every iteration. 4368 // 4369 // FIXME: Relax this constraint to cover all basic blocks that are 4370 // guaranteed to be executed at every iteration. 4371 if (I->getParent() != L->getHeader()) return false; 4372 4373 for (const Instruction &LI : *L->getHeader()) { 4374 if (&LI == I) return true; 4375 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 4376 } 4377 llvm_unreachable("Instruction not contained in its own parent basic block."); 4378 } 4379 4380 bool llvm::propagatesFullPoison(const Instruction *I) { 4381 switch (I->getOpcode()) { 4382 case Instruction::Add: 4383 case Instruction::Sub: 4384 case Instruction::Xor: 4385 case Instruction::Trunc: 4386 case Instruction::BitCast: 4387 case Instruction::AddrSpaceCast: 4388 case Instruction::Mul: 4389 case Instruction::Shl: 4390 case Instruction::GetElementPtr: 4391 // These operations all propagate poison unconditionally. Note that poison 4392 // is not any particular value, so xor or subtraction of poison with 4393 // itself still yields poison, not zero. 4394 return true; 4395 4396 case Instruction::AShr: 4397 case Instruction::SExt: 4398 // For these operations, one bit of the input is replicated across 4399 // multiple output bits. A replicated poison bit is still poison. 4400 return true; 4401 4402 case Instruction::ICmp: 4403 // Comparing poison with any value yields poison. This is why, for 4404 // instance, x s< (x +nsw 1) can be folded to true. 4405 return true; 4406 4407 default: 4408 return false; 4409 } 4410 } 4411 4412 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) { 4413 switch (I->getOpcode()) { 4414 case Instruction::Store: 4415 return cast<StoreInst>(I)->getPointerOperand(); 4416 4417 case Instruction::Load: 4418 return cast<LoadInst>(I)->getPointerOperand(); 4419 4420 case Instruction::AtomicCmpXchg: 4421 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 4422 4423 case Instruction::AtomicRMW: 4424 return cast<AtomicRMWInst>(I)->getPointerOperand(); 4425 4426 case Instruction::UDiv: 4427 case Instruction::SDiv: 4428 case Instruction::URem: 4429 case Instruction::SRem: 4430 return I->getOperand(1); 4431 4432 default: 4433 return nullptr; 4434 } 4435 } 4436 4437 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) { 4438 // We currently only look for uses of poison values within the same basic 4439 // block, as that makes it easier to guarantee that the uses will be 4440 // executed given that PoisonI is executed. 4441 // 4442 // FIXME: Expand this to consider uses beyond the same basic block. To do 4443 // this, look out for the distinction between post-dominance and strong 4444 // post-dominance. 4445 const BasicBlock *BB = PoisonI->getParent(); 4446 4447 // Set of instructions that we have proved will yield poison if PoisonI 4448 // does. 4449 SmallSet<const Value *, 16> YieldsPoison; 4450 SmallSet<const BasicBlock *, 4> Visited; 4451 YieldsPoison.insert(PoisonI); 4452 Visited.insert(PoisonI->getParent()); 4453 4454 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end(); 4455 4456 unsigned Iter = 0; 4457 while (Iter++ < MaxDepth) { 4458 for (auto &I : make_range(Begin, End)) { 4459 if (&I != PoisonI) { 4460 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I); 4461 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) 4462 return true; 4463 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 4464 return false; 4465 } 4466 4467 // Mark poison that propagates from I through uses of I. 4468 if (YieldsPoison.count(&I)) { 4469 for (const User *User : I.users()) { 4470 const Instruction *UserI = cast<Instruction>(User); 4471 if (propagatesFullPoison(UserI)) 4472 YieldsPoison.insert(User); 4473 } 4474 } 4475 } 4476 4477 if (auto *NextBB = BB->getSingleSuccessor()) { 4478 if (Visited.insert(NextBB).second) { 4479 BB = NextBB; 4480 Begin = BB->getFirstNonPHI()->getIterator(); 4481 End = BB->end(); 4482 continue; 4483 } 4484 } 4485 4486 break; 4487 } 4488 return false; 4489 } 4490 4491 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 4492 if (FMF.noNaNs()) 4493 return true; 4494 4495 if (auto *C = dyn_cast<ConstantFP>(V)) 4496 return !C->isNaN(); 4497 4498 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 4499 if (!C->getElementType()->isFloatingPointTy()) 4500 return false; 4501 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 4502 if (C->getElementAsAPFloat(I).isNaN()) 4503 return false; 4504 } 4505 return true; 4506 } 4507 4508 return false; 4509 } 4510 4511 static bool isKnownNonZero(const Value *V) { 4512 if (auto *C = dyn_cast<ConstantFP>(V)) 4513 return !C->isZero(); 4514 4515 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 4516 if (!C->getElementType()->isFloatingPointTy()) 4517 return false; 4518 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 4519 if (C->getElementAsAPFloat(I).isZero()) 4520 return false; 4521 } 4522 return true; 4523 } 4524 4525 return false; 4526 } 4527 4528 /// Match clamp pattern for float types without care about NaNs or signed zeros. 4529 /// Given non-min/max outer cmp/select from the clamp pattern this 4530 /// function recognizes if it can be substitued by a "canonical" min/max 4531 /// pattern. 4532 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, 4533 Value *CmpLHS, Value *CmpRHS, 4534 Value *TrueVal, Value *FalseVal, 4535 Value *&LHS, Value *&RHS) { 4536 // Try to match 4537 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) 4538 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) 4539 // and return description of the outer Max/Min. 4540 4541 // First, check if select has inverse order: 4542 if (CmpRHS == FalseVal) { 4543 std::swap(TrueVal, FalseVal); 4544 Pred = CmpInst::getInversePredicate(Pred); 4545 } 4546 4547 // Assume success now. If there's no match, callers should not use these anyway. 4548 LHS = TrueVal; 4549 RHS = FalseVal; 4550 4551 const APFloat *FC1; 4552 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) 4553 return {SPF_UNKNOWN, SPNB_NA, false}; 4554 4555 const APFloat *FC2; 4556 switch (Pred) { 4557 case CmpInst::FCMP_OLT: 4558 case CmpInst::FCMP_OLE: 4559 case CmpInst::FCMP_ULT: 4560 case CmpInst::FCMP_ULE: 4561 if (match(FalseVal, 4562 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), 4563 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4564 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan) 4565 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; 4566 break; 4567 case CmpInst::FCMP_OGT: 4568 case CmpInst::FCMP_OGE: 4569 case CmpInst::FCMP_UGT: 4570 case CmpInst::FCMP_UGE: 4571 if (match(FalseVal, 4572 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), 4573 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4574 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan) 4575 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; 4576 break; 4577 default: 4578 break; 4579 } 4580 4581 return {SPF_UNKNOWN, SPNB_NA, false}; 4582 } 4583 4584 /// Recognize variations of: 4585 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 4586 static SelectPatternResult matchClamp(CmpInst::Predicate Pred, 4587 Value *CmpLHS, Value *CmpRHS, 4588 Value *TrueVal, Value *FalseVal) { 4589 // Swap the select operands and predicate to match the patterns below. 4590 if (CmpRHS != TrueVal) { 4591 Pred = ICmpInst::getSwappedPredicate(Pred); 4592 std::swap(TrueVal, FalseVal); 4593 } 4594 const APInt *C1; 4595 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 4596 const APInt *C2; 4597 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 4598 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 4599 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 4600 return {SPF_SMAX, SPNB_NA, false}; 4601 4602 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 4603 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 4604 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 4605 return {SPF_SMIN, SPNB_NA, false}; 4606 4607 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 4608 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 4609 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 4610 return {SPF_UMAX, SPNB_NA, false}; 4611 4612 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 4613 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 4614 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 4615 return {SPF_UMIN, SPNB_NA, false}; 4616 } 4617 return {SPF_UNKNOWN, SPNB_NA, false}; 4618 } 4619 4620 /// Recognize variations of: 4621 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) 4622 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, 4623 Value *CmpLHS, Value *CmpRHS, 4624 Value *TVal, Value *FVal, 4625 unsigned Depth) { 4626 // TODO: Allow FP min/max with nnan/nsz. 4627 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison"); 4628 4629 Value *A, *B; 4630 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); 4631 if (!SelectPatternResult::isMinOrMax(L.Flavor)) 4632 return {SPF_UNKNOWN, SPNB_NA, false}; 4633 4634 Value *C, *D; 4635 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); 4636 if (L.Flavor != R.Flavor) 4637 return {SPF_UNKNOWN, SPNB_NA, false}; 4638 4639 // We have something like: x Pred y ? min(a, b) : min(c, d). 4640 // Try to match the compare to the min/max operations of the select operands. 4641 // First, make sure we have the right compare predicate. 4642 switch (L.Flavor) { 4643 case SPF_SMIN: 4644 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { 4645 Pred = ICmpInst::getSwappedPredicate(Pred); 4646 std::swap(CmpLHS, CmpRHS); 4647 } 4648 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 4649 break; 4650 return {SPF_UNKNOWN, SPNB_NA, false}; 4651 case SPF_SMAX: 4652 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { 4653 Pred = ICmpInst::getSwappedPredicate(Pred); 4654 std::swap(CmpLHS, CmpRHS); 4655 } 4656 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 4657 break; 4658 return {SPF_UNKNOWN, SPNB_NA, false}; 4659 case SPF_UMIN: 4660 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { 4661 Pred = ICmpInst::getSwappedPredicate(Pred); 4662 std::swap(CmpLHS, CmpRHS); 4663 } 4664 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 4665 break; 4666 return {SPF_UNKNOWN, SPNB_NA, false}; 4667 case SPF_UMAX: 4668 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 4669 Pred = ICmpInst::getSwappedPredicate(Pred); 4670 std::swap(CmpLHS, CmpRHS); 4671 } 4672 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 4673 break; 4674 return {SPF_UNKNOWN, SPNB_NA, false}; 4675 default: 4676 return {SPF_UNKNOWN, SPNB_NA, false}; 4677 } 4678 4679 // If there is a common operand in the already matched min/max and the other 4680 // min/max operands match the compare operands (either directly or inverted), 4681 // then this is min/max of the same flavor. 4682 4683 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4684 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4685 if (D == B) { 4686 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4687 match(A, m_Not(m_Specific(CmpRHS))))) 4688 return {L.Flavor, SPNB_NA, false}; 4689 } 4690 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4691 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4692 if (C == B) { 4693 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4694 match(A, m_Not(m_Specific(CmpRHS))))) 4695 return {L.Flavor, SPNB_NA, false}; 4696 } 4697 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4698 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4699 if (D == A) { 4700 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4701 match(B, m_Not(m_Specific(CmpRHS))))) 4702 return {L.Flavor, SPNB_NA, false}; 4703 } 4704 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4705 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4706 if (C == A) { 4707 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4708 match(B, m_Not(m_Specific(CmpRHS))))) 4709 return {L.Flavor, SPNB_NA, false}; 4710 } 4711 4712 return {SPF_UNKNOWN, SPNB_NA, false}; 4713 } 4714 4715 /// Match non-obvious integer minimum and maximum sequences. 4716 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 4717 Value *CmpLHS, Value *CmpRHS, 4718 Value *TrueVal, Value *FalseVal, 4719 Value *&LHS, Value *&RHS, 4720 unsigned Depth) { 4721 // Assume success. If there's no match, callers should not use these anyway. 4722 LHS = TrueVal; 4723 RHS = FalseVal; 4724 4725 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); 4726 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4727 return SPR; 4728 4729 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); 4730 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4731 return SPR; 4732 4733 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 4734 return {SPF_UNKNOWN, SPNB_NA, false}; 4735 4736 // Z = X -nsw Y 4737 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) 4738 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) 4739 if (match(TrueVal, m_Zero()) && 4740 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4741 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4742 4743 // Z = X -nsw Y 4744 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) 4745 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) 4746 if (match(FalseVal, m_Zero()) && 4747 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4748 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4749 4750 const APInt *C1; 4751 if (!match(CmpRHS, m_APInt(C1))) 4752 return {SPF_UNKNOWN, SPNB_NA, false}; 4753 4754 // An unsigned min/max can be written with a signed compare. 4755 const APInt *C2; 4756 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 4757 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 4758 // Is the sign bit set? 4759 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 4760 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 4761 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() && 4762 C2->isMaxSignedValue()) 4763 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4764 4765 // Is the sign bit clear? 4766 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 4767 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 4768 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && 4769 C2->isMinSignedValue()) 4770 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4771 } 4772 4773 // Look through 'not' ops to find disguised signed min/max. 4774 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C) 4775 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C) 4776 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) && 4777 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2) 4778 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4779 4780 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X) 4781 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X) 4782 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) && 4783 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2) 4784 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4785 4786 return {SPF_UNKNOWN, SPNB_NA, false}; 4787 } 4788 4789 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) { 4790 assert(X && Y && "Invalid operand"); 4791 4792 // X = sub (0, Y) || X = sub nsw (0, Y) 4793 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) || 4794 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y))))) 4795 return true; 4796 4797 // Y = sub (0, X) || Y = sub nsw (0, X) 4798 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) || 4799 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X))))) 4800 return true; 4801 4802 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A) 4803 Value *A, *B; 4804 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) && 4805 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) || 4806 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) && 4807 match(Y, m_NSWSub(m_Specific(B), m_Specific(A))))); 4808 } 4809 4810 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 4811 FastMathFlags FMF, 4812 Value *CmpLHS, Value *CmpRHS, 4813 Value *TrueVal, Value *FalseVal, 4814 Value *&LHS, Value *&RHS, 4815 unsigned Depth) { 4816 if (CmpInst::isFPPredicate(Pred)) { 4817 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one 4818 // 0.0 operand, set the compare's 0.0 operands to that same value for the 4819 // purpose of identifying min/max. Disregard vector constants with undefined 4820 // elements because those can not be back-propagated for analysis. 4821 Value *OutputZeroVal = nullptr; 4822 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) && 4823 !cast<Constant>(TrueVal)->containsUndefElement()) 4824 OutputZeroVal = TrueVal; 4825 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) && 4826 !cast<Constant>(FalseVal)->containsUndefElement()) 4827 OutputZeroVal = FalseVal; 4828 4829 if (OutputZeroVal) { 4830 if (match(CmpLHS, m_AnyZeroFP())) 4831 CmpLHS = OutputZeroVal; 4832 if (match(CmpRHS, m_AnyZeroFP())) 4833 CmpRHS = OutputZeroVal; 4834 } 4835 } 4836 4837 LHS = CmpLHS; 4838 RHS = CmpRHS; 4839 4840 // Signed zero may return inconsistent results between implementations. 4841 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 4842 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 4843 // Therefore, we behave conservatively and only proceed if at least one of the 4844 // operands is known to not be zero or if we don't care about signed zero. 4845 switch (Pred) { 4846 default: break; 4847 // FIXME: Include OGT/OLT/UGT/ULT. 4848 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 4849 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 4850 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4851 !isKnownNonZero(CmpRHS)) 4852 return {SPF_UNKNOWN, SPNB_NA, false}; 4853 } 4854 4855 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 4856 bool Ordered = false; 4857 4858 // When given one NaN and one non-NaN input: 4859 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 4860 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 4861 // ordered comparison fails), which could be NaN or non-NaN. 4862 // so here we discover exactly what NaN behavior is required/accepted. 4863 if (CmpInst::isFPPredicate(Pred)) { 4864 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 4865 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 4866 4867 if (LHSSafe && RHSSafe) { 4868 // Both operands are known non-NaN. 4869 NaNBehavior = SPNB_RETURNS_ANY; 4870 } else if (CmpInst::isOrdered(Pred)) { 4871 // An ordered comparison will return false when given a NaN, so it 4872 // returns the RHS. 4873 Ordered = true; 4874 if (LHSSafe) 4875 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 4876 NaNBehavior = SPNB_RETURNS_NAN; 4877 else if (RHSSafe) 4878 NaNBehavior = SPNB_RETURNS_OTHER; 4879 else 4880 // Completely unsafe. 4881 return {SPF_UNKNOWN, SPNB_NA, false}; 4882 } else { 4883 Ordered = false; 4884 // An unordered comparison will return true when given a NaN, so it 4885 // returns the LHS. 4886 if (LHSSafe) 4887 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 4888 NaNBehavior = SPNB_RETURNS_OTHER; 4889 else if (RHSSafe) 4890 NaNBehavior = SPNB_RETURNS_NAN; 4891 else 4892 // Completely unsafe. 4893 return {SPF_UNKNOWN, SPNB_NA, false}; 4894 } 4895 } 4896 4897 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 4898 std::swap(CmpLHS, CmpRHS); 4899 Pred = CmpInst::getSwappedPredicate(Pred); 4900 if (NaNBehavior == SPNB_RETURNS_NAN) 4901 NaNBehavior = SPNB_RETURNS_OTHER; 4902 else if (NaNBehavior == SPNB_RETURNS_OTHER) 4903 NaNBehavior = SPNB_RETURNS_NAN; 4904 Ordered = !Ordered; 4905 } 4906 4907 // ([if]cmp X, Y) ? X : Y 4908 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 4909 switch (Pred) { 4910 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 4911 case ICmpInst::ICMP_UGT: 4912 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 4913 case ICmpInst::ICMP_SGT: 4914 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 4915 case ICmpInst::ICMP_ULT: 4916 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 4917 case ICmpInst::ICMP_SLT: 4918 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 4919 case FCmpInst::FCMP_UGT: 4920 case FCmpInst::FCMP_UGE: 4921 case FCmpInst::FCMP_OGT: 4922 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 4923 case FCmpInst::FCMP_ULT: 4924 case FCmpInst::FCMP_ULE: 4925 case FCmpInst::FCMP_OLT: 4926 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 4927 } 4928 } 4929 4930 if (isKnownNegation(TrueVal, FalseVal)) { 4931 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can 4932 // match against either LHS or sext(LHS). 4933 auto MaybeSExtCmpLHS = 4934 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS))); 4935 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes()); 4936 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One()); 4937 if (match(TrueVal, MaybeSExtCmpLHS)) { 4938 // Set the return values. If the compare uses the negated value (-X >s 0), 4939 // swap the return values because the negated value is always 'RHS'. 4940 LHS = TrueVal; 4941 RHS = FalseVal; 4942 if (match(CmpLHS, m_Neg(m_Specific(FalseVal)))) 4943 std::swap(LHS, RHS); 4944 4945 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X) 4946 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X) 4947 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 4948 return {SPF_ABS, SPNB_NA, false}; 4949 4950 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X) 4951 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X) 4952 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 4953 return {SPF_NABS, SPNB_NA, false}; 4954 } 4955 else if (match(FalseVal, MaybeSExtCmpLHS)) { 4956 // Set the return values. If the compare uses the negated value (-X >s 0), 4957 // swap the return values because the negated value is always 'RHS'. 4958 LHS = FalseVal; 4959 RHS = TrueVal; 4960 if (match(CmpLHS, m_Neg(m_Specific(TrueVal)))) 4961 std::swap(LHS, RHS); 4962 4963 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X) 4964 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X) 4965 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 4966 return {SPF_NABS, SPNB_NA, false}; 4967 4968 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X) 4969 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X) 4970 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 4971 return {SPF_ABS, SPNB_NA, false}; 4972 } 4973 } 4974 4975 if (CmpInst::isIntPredicate(Pred)) 4976 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); 4977 4978 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar 4979 // may return either -0.0 or 0.0, so fcmp/select pair has stricter 4980 // semantics than minNum. Be conservative in such case. 4981 if (NaNBehavior != SPNB_RETURNS_ANY || 4982 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4983 !isKnownNonZero(CmpRHS))) 4984 return {SPF_UNKNOWN, SPNB_NA, false}; 4985 4986 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 4987 } 4988 4989 /// Helps to match a select pattern in case of a type mismatch. 4990 /// 4991 /// The function processes the case when type of true and false values of a 4992 /// select instruction differs from type of the cmp instruction operands because 4993 /// of a cast instruction. The function checks if it is legal to move the cast 4994 /// operation after "select". If yes, it returns the new second value of 4995 /// "select" (with the assumption that cast is moved): 4996 /// 1. As operand of cast instruction when both values of "select" are same cast 4997 /// instructions. 4998 /// 2. As restored constant (by applying reverse cast operation) when the first 4999 /// value of the "select" is a cast operation and the second value is a 5000 /// constant. 5001 /// NOTE: We return only the new second value because the first value could be 5002 /// accessed as operand of cast instruction. 5003 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 5004 Instruction::CastOps *CastOp) { 5005 auto *Cast1 = dyn_cast<CastInst>(V1); 5006 if (!Cast1) 5007 return nullptr; 5008 5009 *CastOp = Cast1->getOpcode(); 5010 Type *SrcTy = Cast1->getSrcTy(); 5011 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 5012 // If V1 and V2 are both the same cast from the same type, look through V1. 5013 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 5014 return Cast2->getOperand(0); 5015 return nullptr; 5016 } 5017 5018 auto *C = dyn_cast<Constant>(V2); 5019 if (!C) 5020 return nullptr; 5021 5022 Constant *CastedTo = nullptr; 5023 switch (*CastOp) { 5024 case Instruction::ZExt: 5025 if (CmpI->isUnsigned()) 5026 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 5027 break; 5028 case Instruction::SExt: 5029 if (CmpI->isSigned()) 5030 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 5031 break; 5032 case Instruction::Trunc: 5033 Constant *CmpConst; 5034 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && 5035 CmpConst->getType() == SrcTy) { 5036 // Here we have the following case: 5037 // 5038 // %cond = cmp iN %x, CmpConst 5039 // %tr = trunc iN %x to iK 5040 // %narrowsel = select i1 %cond, iK %t, iK C 5041 // 5042 // We can always move trunc after select operation: 5043 // 5044 // %cond = cmp iN %x, CmpConst 5045 // %widesel = select i1 %cond, iN %x, iN CmpConst 5046 // %tr = trunc iN %widesel to iK 5047 // 5048 // Note that C could be extended in any way because we don't care about 5049 // upper bits after truncation. It can't be abs pattern, because it would 5050 // look like: 5051 // 5052 // select i1 %cond, x, -x. 5053 // 5054 // So only min/max pattern could be matched. Such match requires widened C 5055 // == CmpConst. That is why set widened C = CmpConst, condition trunc 5056 // CmpConst == C is checked below. 5057 CastedTo = CmpConst; 5058 } else { 5059 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 5060 } 5061 break; 5062 case Instruction::FPTrunc: 5063 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 5064 break; 5065 case Instruction::FPExt: 5066 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 5067 break; 5068 case Instruction::FPToUI: 5069 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 5070 break; 5071 case Instruction::FPToSI: 5072 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 5073 break; 5074 case Instruction::UIToFP: 5075 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 5076 break; 5077 case Instruction::SIToFP: 5078 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 5079 break; 5080 default: 5081 break; 5082 } 5083 5084 if (!CastedTo) 5085 return nullptr; 5086 5087 // Make sure the cast doesn't lose any information. 5088 Constant *CastedBack = 5089 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 5090 if (CastedBack != C) 5091 return nullptr; 5092 5093 return CastedTo; 5094 } 5095 5096 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 5097 Instruction::CastOps *CastOp, 5098 unsigned Depth) { 5099 if (Depth >= MaxDepth) 5100 return {SPF_UNKNOWN, SPNB_NA, false}; 5101 5102 SelectInst *SI = dyn_cast<SelectInst>(V); 5103 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 5104 5105 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 5106 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 5107 5108 CmpInst::Predicate Pred = CmpI->getPredicate(); 5109 Value *CmpLHS = CmpI->getOperand(0); 5110 Value *CmpRHS = CmpI->getOperand(1); 5111 Value *TrueVal = SI->getTrueValue(); 5112 Value *FalseVal = SI->getFalseValue(); 5113 FastMathFlags FMF; 5114 if (isa<FPMathOperator>(CmpI)) 5115 FMF = CmpI->getFastMathFlags(); 5116 5117 // Bail out early. 5118 if (CmpI->isEquality()) 5119 return {SPF_UNKNOWN, SPNB_NA, false}; 5120 5121 // Deal with type mismatches. 5122 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 5123 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { 5124 // If this is a potential fmin/fmax with a cast to integer, then ignore 5125 // -0.0 because there is no corresponding integer value. 5126 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 5127 FMF.setNoSignedZeros(); 5128 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 5129 cast<CastInst>(TrueVal)->getOperand(0), C, 5130 LHS, RHS, Depth); 5131 } 5132 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { 5133 // If this is a potential fmin/fmax with a cast to integer, then ignore 5134 // -0.0 because there is no corresponding integer value. 5135 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 5136 FMF.setNoSignedZeros(); 5137 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 5138 C, cast<CastInst>(FalseVal)->getOperand(0), 5139 LHS, RHS, Depth); 5140 } 5141 } 5142 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 5143 LHS, RHS, Depth); 5144 } 5145 5146 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) { 5147 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT; 5148 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT; 5149 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT; 5150 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT; 5151 if (SPF == SPF_FMINNUM) 5152 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; 5153 if (SPF == SPF_FMAXNUM) 5154 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; 5155 llvm_unreachable("unhandled!"); 5156 } 5157 5158 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) { 5159 if (SPF == SPF_SMIN) return SPF_SMAX; 5160 if (SPF == SPF_UMIN) return SPF_UMAX; 5161 if (SPF == SPF_SMAX) return SPF_SMIN; 5162 if (SPF == SPF_UMAX) return SPF_UMIN; 5163 llvm_unreachable("unhandled!"); 5164 } 5165 5166 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) { 5167 return getMinMaxPred(getInverseMinMaxFlavor(SPF)); 5168 } 5169 5170 /// Return true if "icmp Pred LHS RHS" is always true. 5171 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, 5172 const Value *RHS, const DataLayout &DL, 5173 unsigned Depth) { 5174 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 5175 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 5176 return true; 5177 5178 switch (Pred) { 5179 default: 5180 return false; 5181 5182 case CmpInst::ICMP_SLE: { 5183 const APInt *C; 5184 5185 // LHS s<= LHS +_{nsw} C if C >= 0 5186 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 5187 return !C->isNegative(); 5188 return false; 5189 } 5190 5191 case CmpInst::ICMP_ULE: { 5192 const APInt *C; 5193 5194 // LHS u<= LHS +_{nuw} C for any C 5195 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 5196 return true; 5197 5198 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 5199 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 5200 const Value *&X, 5201 const APInt *&CA, const APInt *&CB) { 5202 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 5203 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 5204 return true; 5205 5206 // If X & C == 0 then (X | C) == X +_{nuw} C 5207 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 5208 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 5209 KnownBits Known(CA->getBitWidth()); 5210 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, 5211 /*CxtI*/ nullptr, /*DT*/ nullptr); 5212 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 5213 return true; 5214 } 5215 5216 return false; 5217 }; 5218 5219 const Value *X; 5220 const APInt *CLHS, *CRHS; 5221 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 5222 return CLHS->ule(*CRHS); 5223 5224 return false; 5225 } 5226 } 5227 } 5228 5229 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 5230 /// ALHS ARHS" is true. Otherwise, return None. 5231 static Optional<bool> 5232 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 5233 const Value *ARHS, const Value *BLHS, const Value *BRHS, 5234 const DataLayout &DL, unsigned Depth) { 5235 switch (Pred) { 5236 default: 5237 return None; 5238 5239 case CmpInst::ICMP_SLT: 5240 case CmpInst::ICMP_SLE: 5241 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && 5242 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) 5243 return true; 5244 return None; 5245 5246 case CmpInst::ICMP_ULT: 5247 case CmpInst::ICMP_ULE: 5248 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && 5249 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) 5250 return true; 5251 return None; 5252 } 5253 } 5254 5255 /// Return true if the operands of the two compares match. IsSwappedOps is true 5256 /// when the operands match, but are swapped. 5257 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 5258 const Value *BLHS, const Value *BRHS, 5259 bool &IsSwappedOps) { 5260 5261 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 5262 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 5263 return IsMatchingOps || IsSwappedOps; 5264 } 5265 5266 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true. 5267 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false. 5268 /// Otherwise, return None if we can't infer anything. 5269 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 5270 CmpInst::Predicate BPred, 5271 bool AreSwappedOps) { 5272 // Canonicalize the predicate as if the operands were not commuted. 5273 if (AreSwappedOps) 5274 BPred = ICmpInst::getSwappedPredicate(BPred); 5275 5276 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 5277 return true; 5278 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 5279 return false; 5280 5281 return None; 5282 } 5283 5284 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true. 5285 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false. 5286 /// Otherwise, return None if we can't infer anything. 5287 static Optional<bool> 5288 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, 5289 const ConstantInt *C1, 5290 CmpInst::Predicate BPred, 5291 const ConstantInt *C2) { 5292 ConstantRange DomCR = 5293 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 5294 ConstantRange CR = 5295 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); 5296 ConstantRange Intersection = DomCR.intersectWith(CR); 5297 ConstantRange Difference = DomCR.difference(CR); 5298 if (Intersection.isEmptySet()) 5299 return false; 5300 if (Difference.isEmptySet()) 5301 return true; 5302 return None; 5303 } 5304 5305 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 5306 /// false. Otherwise, return None if we can't infer anything. 5307 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS, 5308 const ICmpInst *RHS, 5309 const DataLayout &DL, bool LHSIsTrue, 5310 unsigned Depth) { 5311 Value *ALHS = LHS->getOperand(0); 5312 Value *ARHS = LHS->getOperand(1); 5313 // The rest of the logic assumes the LHS condition is true. If that's not the 5314 // case, invert the predicate to make it so. 5315 ICmpInst::Predicate APred = 5316 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); 5317 5318 Value *BLHS = RHS->getOperand(0); 5319 Value *BRHS = RHS->getOperand(1); 5320 ICmpInst::Predicate BPred = RHS->getPredicate(); 5321 5322 // Can we infer anything when the two compares have matching operands? 5323 bool AreSwappedOps; 5324 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) { 5325 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 5326 APred, BPred, AreSwappedOps)) 5327 return Implication; 5328 // No amount of additional analysis will infer the second condition, so 5329 // early exit. 5330 return None; 5331 } 5332 5333 // Can we infer anything when the LHS operands match and the RHS operands are 5334 // constants (not necessarily matching)? 5335 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 5336 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 5337 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS))) 5338 return Implication; 5339 // No amount of additional analysis will infer the second condition, so 5340 // early exit. 5341 return None; 5342 } 5343 5344 if (APred == BPred) 5345 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth); 5346 return None; 5347 } 5348 5349 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 5350 /// false. Otherwise, return None if we can't infer anything. We expect the 5351 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction. 5352 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS, 5353 const ICmpInst *RHS, 5354 const DataLayout &DL, bool LHSIsTrue, 5355 unsigned Depth) { 5356 // The LHS must be an 'or' or an 'and' instruction. 5357 assert((LHS->getOpcode() == Instruction::And || 5358 LHS->getOpcode() == Instruction::Or) && 5359 "Expected LHS to be 'and' or 'or'."); 5360 5361 assert(Depth <= MaxDepth && "Hit recursion limit"); 5362 5363 // If the result of an 'or' is false, then we know both legs of the 'or' are 5364 // false. Similarly, if the result of an 'and' is true, then we know both 5365 // legs of the 'and' are true. 5366 Value *ALHS, *ARHS; 5367 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) || 5368 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) { 5369 // FIXME: Make this non-recursion. 5370 if (Optional<bool> Implication = 5371 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1)) 5372 return Implication; 5373 if (Optional<bool> Implication = 5374 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1)) 5375 return Implication; 5376 return None; 5377 } 5378 return None; 5379 } 5380 5381 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 5382 const DataLayout &DL, bool LHSIsTrue, 5383 unsigned Depth) { 5384 // Bail out when we hit the limit. 5385 if (Depth == MaxDepth) 5386 return None; 5387 5388 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for 5389 // example. 5390 if (LHS->getType() != RHS->getType()) 5391 return None; 5392 5393 Type *OpTy = LHS->getType(); 5394 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!"); 5395 5396 // LHS ==> RHS by definition 5397 if (LHS == RHS) 5398 return LHSIsTrue; 5399 5400 // FIXME: Extending the code below to handle vectors. 5401 if (OpTy->isVectorTy()) 5402 return None; 5403 5404 assert(OpTy->isIntegerTy(1) && "implied by above"); 5405 5406 // Both LHS and RHS are icmps. 5407 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); 5408 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS); 5409 if (LHSCmp && RHSCmp) 5410 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth); 5411 5412 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be 5413 // an icmp. FIXME: Add support for and/or on the RHS. 5414 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS); 5415 if (LHSBO && RHSCmp) { 5416 if ((LHSBO->getOpcode() == Instruction::And || 5417 LHSBO->getOpcode() == Instruction::Or)) 5418 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth); 5419 } 5420 return None; 5421 } 5422 5423 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond, 5424 const Instruction *ContextI, 5425 const DataLayout &DL) { 5426 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool"); 5427 if (!ContextI || !ContextI->getParent()) 5428 return None; 5429 5430 // TODO: This is a poor/cheap way to determine dominance. Should we use a 5431 // dominator tree (eg, from a SimplifyQuery) instead? 5432 const BasicBlock *ContextBB = ContextI->getParent(); 5433 const BasicBlock *PredBB = ContextBB->getSinglePredecessor(); 5434 if (!PredBB) 5435 return None; 5436 5437 // We need a conditional branch in the predecessor. 5438 Value *PredCond; 5439 BasicBlock *TrueBB, *FalseBB; 5440 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB))) 5441 return None; 5442 5443 // The branch should get simplified. Don't bother simplifying this condition. 5444 if (TrueBB == FalseBB) 5445 return None; 5446 5447 assert((TrueBB == ContextBB || FalseBB == ContextBB) && 5448 "Predecessor block does not point to successor?"); 5449 5450 // Is this condition implied by the predecessor condition? 5451 bool CondIsTrue = TrueBB == ContextBB; 5452 return isImpliedCondition(PredCond, Cond, DL, CondIsTrue); 5453 } 5454