1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains routines that help analyze properties that chains of 10 // computations have. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/ValueTracking.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/None.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/StringRef.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AssumptionCache.h" 28 #include "llvm/Analysis/GuardUtils.h" 29 #include "llvm/Analysis/InstructionSimplify.h" 30 #include "llvm/Analysis/Loads.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 33 #include "llvm/Analysis/TargetLibraryInfo.h" 34 #include "llvm/IR/Argument.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/CallSite.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/ConstantRange.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/DiagnosticInfo.h" 44 #include "llvm/IR/Dominators.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/GetElementPtrTypeIterator.h" 47 #include "llvm/IR/GlobalAlias.h" 48 #include "llvm/IR/GlobalValue.h" 49 #include "llvm/IR/GlobalVariable.h" 50 #include "llvm/IR/InstrTypes.h" 51 #include "llvm/IR/Instruction.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/IntrinsicInst.h" 54 #include "llvm/IR/Intrinsics.h" 55 #include "llvm/IR/LLVMContext.h" 56 #include "llvm/IR/Metadata.h" 57 #include "llvm/IR/Module.h" 58 #include "llvm/IR/Operator.h" 59 #include "llvm/IR/PatternMatch.h" 60 #include "llvm/IR/Type.h" 61 #include "llvm/IR/User.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/Support/Casting.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/Compiler.h" 66 #include "llvm/Support/ErrorHandling.h" 67 #include "llvm/Support/KnownBits.h" 68 #include "llvm/Support/MathExtras.h" 69 #include <algorithm> 70 #include <array> 71 #include <cassert> 72 #include <cstdint> 73 #include <iterator> 74 #include <utility> 75 76 using namespace llvm; 77 using namespace llvm::PatternMatch; 78 79 const unsigned MaxDepth = 6; 80 81 // Controls the number of uses of the value searched for possible 82 // dominating comparisons. 83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 84 cl::Hidden, cl::init(20)); 85 86 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 87 /// returns the element type's bitwidth. 88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 89 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 90 return BitWidth; 91 92 return DL.getIndexTypeSizeInBits(Ty); 93 } 94 95 namespace { 96 97 // Simplifying using an assume can only be done in a particular control-flow 98 // context (the context instruction provides that context). If an assume and 99 // the context instruction are not in the same block then the DT helps in 100 // figuring out if we can use it. 101 struct Query { 102 const DataLayout &DL; 103 AssumptionCache *AC; 104 const Instruction *CxtI; 105 const DominatorTree *DT; 106 107 // Unlike the other analyses, this may be a nullptr because not all clients 108 // provide it currently. 109 OptimizationRemarkEmitter *ORE; 110 111 /// Set of assumptions that should be excluded from further queries. 112 /// This is because of the potential for mutual recursion to cause 113 /// computeKnownBits to repeatedly visit the same assume intrinsic. The 114 /// classic case of this is assume(x = y), which will attempt to determine 115 /// bits in x from bits in y, which will attempt to determine bits in y from 116 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 117 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo 118 /// (all of which can call computeKnownBits), and so on. 119 std::array<const Value *, MaxDepth> Excluded; 120 121 /// If true, it is safe to use metadata during simplification. 122 InstrInfoQuery IIQ; 123 124 unsigned NumExcluded = 0; 125 126 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 127 const DominatorTree *DT, bool UseInstrInfo, 128 OptimizationRemarkEmitter *ORE = nullptr) 129 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {} 130 131 Query(const Query &Q, const Value *NewExcl) 132 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ), 133 NumExcluded(Q.NumExcluded) { 134 Excluded = Q.Excluded; 135 Excluded[NumExcluded++] = NewExcl; 136 assert(NumExcluded <= Excluded.size()); 137 } 138 139 bool isExcluded(const Value *Value) const { 140 if (NumExcluded == 0) 141 return false; 142 auto End = Excluded.begin() + NumExcluded; 143 return std::find(Excluded.begin(), End, Value) != End; 144 } 145 }; 146 147 } // end anonymous namespace 148 149 // Given the provided Value and, potentially, a context instruction, return 150 // the preferred context instruction (if any). 151 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 152 // If we've been provided with a context instruction, then use that (provided 153 // it has been inserted). 154 if (CxtI && CxtI->getParent()) 155 return CxtI; 156 157 // If the value is really an already-inserted instruction, then use that. 158 CxtI = dyn_cast<Instruction>(V); 159 if (CxtI && CxtI->getParent()) 160 return CxtI; 161 162 return nullptr; 163 } 164 165 static void computeKnownBits(const Value *V, KnownBits &Known, 166 unsigned Depth, const Query &Q); 167 168 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 169 const DataLayout &DL, unsigned Depth, 170 AssumptionCache *AC, const Instruction *CxtI, 171 const DominatorTree *DT, 172 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { 173 ::computeKnownBits(V, Known, Depth, 174 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 175 } 176 177 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 178 const Query &Q); 179 180 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 181 unsigned Depth, AssumptionCache *AC, 182 const Instruction *CxtI, 183 const DominatorTree *DT, 184 OptimizationRemarkEmitter *ORE, 185 bool UseInstrInfo) { 186 return ::computeKnownBits( 187 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 188 } 189 190 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 191 const DataLayout &DL, AssumptionCache *AC, 192 const Instruction *CxtI, const DominatorTree *DT, 193 bool UseInstrInfo) { 194 assert(LHS->getType() == RHS->getType() && 195 "LHS and RHS should have the same type"); 196 assert(LHS->getType()->isIntOrIntVectorTy() && 197 "LHS and RHS should be integers"); 198 // Look for an inverted mask: (X & ~M) op (Y & M). 199 Value *M; 200 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 201 match(RHS, m_c_And(m_Specific(M), m_Value()))) 202 return true; 203 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 204 match(LHS, m_c_And(m_Specific(M), m_Value()))) 205 return true; 206 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 207 KnownBits LHSKnown(IT->getBitWidth()); 208 KnownBits RHSKnown(IT->getBitWidth()); 209 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 210 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 211 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue(); 212 } 213 214 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) { 215 for (const User *U : CxtI->users()) { 216 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) 217 if (IC->isEquality()) 218 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1))) 219 if (C->isNullValue()) 220 continue; 221 return false; 222 } 223 return true; 224 } 225 226 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 227 const Query &Q); 228 229 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 230 bool OrZero, unsigned Depth, 231 AssumptionCache *AC, const Instruction *CxtI, 232 const DominatorTree *DT, bool UseInstrInfo) { 233 return ::isKnownToBeAPowerOfTwo( 234 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 235 } 236 237 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 238 239 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 240 AssumptionCache *AC, const Instruction *CxtI, 241 const DominatorTree *DT, bool UseInstrInfo) { 242 return ::isKnownNonZero(V, Depth, 243 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 244 } 245 246 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 247 unsigned Depth, AssumptionCache *AC, 248 const Instruction *CxtI, const DominatorTree *DT, 249 bool UseInstrInfo) { 250 KnownBits Known = 251 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 252 return Known.isNonNegative(); 253 } 254 255 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 256 AssumptionCache *AC, const Instruction *CxtI, 257 const DominatorTree *DT, bool UseInstrInfo) { 258 if (auto *CI = dyn_cast<ConstantInt>(V)) 259 return CI->getValue().isStrictlyPositive(); 260 261 // TODO: We'd doing two recursive queries here. We should factor this such 262 // that only a single query is needed. 263 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) && 264 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); 265 } 266 267 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 268 AssumptionCache *AC, const Instruction *CxtI, 269 const DominatorTree *DT, bool UseInstrInfo) { 270 KnownBits Known = 271 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 272 return Known.isNegative(); 273 } 274 275 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); 276 277 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 278 const DataLayout &DL, AssumptionCache *AC, 279 const Instruction *CxtI, const DominatorTree *DT, 280 bool UseInstrInfo) { 281 return ::isKnownNonEqual(V1, V2, 282 Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT, 283 UseInstrInfo, /*ORE=*/nullptr)); 284 } 285 286 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 287 const Query &Q); 288 289 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 290 const DataLayout &DL, unsigned Depth, 291 AssumptionCache *AC, const Instruction *CxtI, 292 const DominatorTree *DT, bool UseInstrInfo) { 293 return ::MaskedValueIsZero( 294 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 295 } 296 297 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 298 const Query &Q); 299 300 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 301 unsigned Depth, AssumptionCache *AC, 302 const Instruction *CxtI, 303 const DominatorTree *DT, bool UseInstrInfo) { 304 return ::ComputeNumSignBits( 305 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 306 } 307 308 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 309 bool NSW, 310 KnownBits &KnownOut, KnownBits &Known2, 311 unsigned Depth, const Query &Q) { 312 unsigned BitWidth = KnownOut.getBitWidth(); 313 314 // If an initial sequence of bits in the result is not needed, the 315 // corresponding bits in the operands are not needed. 316 KnownBits LHSKnown(BitWidth); 317 computeKnownBits(Op0, LHSKnown, Depth + 1, Q); 318 computeKnownBits(Op1, Known2, Depth + 1, Q); 319 320 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2); 321 } 322 323 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 324 KnownBits &Known, KnownBits &Known2, 325 unsigned Depth, const Query &Q) { 326 unsigned BitWidth = Known.getBitWidth(); 327 computeKnownBits(Op1, Known, Depth + 1, Q); 328 computeKnownBits(Op0, Known2, Depth + 1, Q); 329 330 bool isKnownNegative = false; 331 bool isKnownNonNegative = false; 332 // If the multiplication is known not to overflow, compute the sign bit. 333 if (NSW) { 334 if (Op0 == Op1) { 335 // The product of a number with itself is non-negative. 336 isKnownNonNegative = true; 337 } else { 338 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 339 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 340 bool isKnownNegativeOp1 = Known.isNegative(); 341 bool isKnownNegativeOp0 = Known2.isNegative(); 342 // The product of two numbers with the same sign is non-negative. 343 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 344 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 345 // The product of a negative number and a non-negative number is either 346 // negative or zero. 347 if (!isKnownNonNegative) 348 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 349 isKnownNonZero(Op0, Depth, Q)) || 350 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 351 isKnownNonZero(Op1, Depth, Q)); 352 } 353 } 354 355 assert(!Known.hasConflict() && !Known2.hasConflict()); 356 // Compute a conservative estimate for high known-0 bits. 357 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 358 Known2.countMinLeadingZeros(), 359 BitWidth) - BitWidth; 360 LeadZ = std::min(LeadZ, BitWidth); 361 362 // The result of the bottom bits of an integer multiply can be 363 // inferred by looking at the bottom bits of both operands and 364 // multiplying them together. 365 // We can infer at least the minimum number of known trailing bits 366 // of both operands. Depending on number of trailing zeros, we can 367 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming 368 // a and b are divisible by m and n respectively. 369 // We then calculate how many of those bits are inferrable and set 370 // the output. For example, the i8 mul: 371 // a = XXXX1100 (12) 372 // b = XXXX1110 (14) 373 // We know the bottom 3 bits are zero since the first can be divided by 374 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4). 375 // Applying the multiplication to the trimmed arguments gets: 376 // XX11 (3) 377 // X111 (7) 378 // ------- 379 // XX11 380 // XX11 381 // XX11 382 // XX11 383 // ------- 384 // XXXXX01 385 // Which allows us to infer the 2 LSBs. Since we're multiplying the result 386 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits. 387 // The proof for this can be described as: 388 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) && 389 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) + 390 // umin(countTrailingZeros(C2), C6) + 391 // umin(C5 - umin(countTrailingZeros(C1), C5), 392 // C6 - umin(countTrailingZeros(C2), C6)))) - 1) 393 // %aa = shl i8 %a, C5 394 // %bb = shl i8 %b, C6 395 // %aaa = or i8 %aa, C1 396 // %bbb = or i8 %bb, C2 397 // %mul = mul i8 %aaa, %bbb 398 // %mask = and i8 %mul, C7 399 // => 400 // %mask = i8 ((C1*C2)&C7) 401 // Where C5, C6 describe the known bits of %a, %b 402 // C1, C2 describe the known bottom bits of %a, %b. 403 // C7 describes the mask of the known bits of the result. 404 APInt Bottom0 = Known.One; 405 APInt Bottom1 = Known2.One; 406 407 // How many times we'd be able to divide each argument by 2 (shr by 1). 408 // This gives us the number of trailing zeros on the multiplication result. 409 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes(); 410 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes(); 411 unsigned TrailZero0 = Known.countMinTrailingZeros(); 412 unsigned TrailZero1 = Known2.countMinTrailingZeros(); 413 unsigned TrailZ = TrailZero0 + TrailZero1; 414 415 // Figure out the fewest known-bits operand. 416 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0, 417 TrailBitsKnown1 - TrailZero1); 418 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth); 419 420 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) * 421 Bottom1.getLoBits(TrailBitsKnown1); 422 423 Known.resetAll(); 424 Known.Zero.setHighBits(LeadZ); 425 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown); 426 Known.One |= BottomKnown.getLoBits(ResultBitsKnown); 427 428 // Only make use of no-wrap flags if we failed to compute the sign bit 429 // directly. This matters if the multiplication always overflows, in 430 // which case we prefer to follow the result of the direct computation, 431 // though as the program is invoking undefined behaviour we can choose 432 // whatever we like here. 433 if (isKnownNonNegative && !Known.isNegative()) 434 Known.makeNonNegative(); 435 else if (isKnownNegative && !Known.isNonNegative()) 436 Known.makeNegative(); 437 } 438 439 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 440 KnownBits &Known) { 441 unsigned BitWidth = Known.getBitWidth(); 442 unsigned NumRanges = Ranges.getNumOperands() / 2; 443 assert(NumRanges >= 1); 444 445 Known.Zero.setAllBits(); 446 Known.One.setAllBits(); 447 448 for (unsigned i = 0; i < NumRanges; ++i) { 449 ConstantInt *Lower = 450 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 451 ConstantInt *Upper = 452 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 453 ConstantRange Range(Lower->getValue(), Upper->getValue()); 454 455 // The first CommonPrefixBits of all values in Range are equal. 456 unsigned CommonPrefixBits = 457 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 458 459 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 460 Known.One &= Range.getUnsignedMax() & Mask; 461 Known.Zero &= ~Range.getUnsignedMax() & Mask; 462 } 463 } 464 465 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 466 SmallVector<const Value *, 16> WorkSet(1, I); 467 SmallPtrSet<const Value *, 32> Visited; 468 SmallPtrSet<const Value *, 16> EphValues; 469 470 // The instruction defining an assumption's condition itself is always 471 // considered ephemeral to that assumption (even if it has other 472 // non-ephemeral users). See r246696's test case for an example. 473 if (is_contained(I->operands(), E)) 474 return true; 475 476 while (!WorkSet.empty()) { 477 const Value *V = WorkSet.pop_back_val(); 478 if (!Visited.insert(V).second) 479 continue; 480 481 // If all uses of this value are ephemeral, then so is this value. 482 if (llvm::all_of(V->users(), [&](const User *U) { 483 return EphValues.count(U); 484 })) { 485 if (V == E) 486 return true; 487 488 if (V == I || isSafeToSpeculativelyExecute(V)) { 489 EphValues.insert(V); 490 if (const User *U = dyn_cast<User>(V)) 491 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 492 J != JE; ++J) 493 WorkSet.push_back(*J); 494 } 495 } 496 } 497 498 return false; 499 } 500 501 // Is this an intrinsic that cannot be speculated but also cannot trap? 502 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { 503 if (const CallInst *CI = dyn_cast<CallInst>(I)) 504 if (Function *F = CI->getCalledFunction()) 505 switch (F->getIntrinsicID()) { 506 default: break; 507 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 508 case Intrinsic::assume: 509 case Intrinsic::sideeffect: 510 case Intrinsic::dbg_declare: 511 case Intrinsic::dbg_value: 512 case Intrinsic::dbg_label: 513 case Intrinsic::invariant_start: 514 case Intrinsic::invariant_end: 515 case Intrinsic::lifetime_start: 516 case Intrinsic::lifetime_end: 517 case Intrinsic::objectsize: 518 case Intrinsic::ptr_annotation: 519 case Intrinsic::var_annotation: 520 return true; 521 } 522 523 return false; 524 } 525 526 bool llvm::isValidAssumeForContext(const Instruction *Inv, 527 const Instruction *CxtI, 528 const DominatorTree *DT) { 529 // There are two restrictions on the use of an assume: 530 // 1. The assume must dominate the context (or the control flow must 531 // reach the assume whenever it reaches the context). 532 // 2. The context must not be in the assume's set of ephemeral values 533 // (otherwise we will use the assume to prove that the condition 534 // feeding the assume is trivially true, thus causing the removal of 535 // the assume). 536 537 if (DT) { 538 if (DT->dominates(Inv, CxtI)) 539 return true; 540 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 541 // We don't have a DT, but this trivially dominates. 542 return true; 543 } 544 545 // With or without a DT, the only remaining case we will check is if the 546 // instructions are in the same BB. Give up if that is not the case. 547 if (Inv->getParent() != CxtI->getParent()) 548 return false; 549 550 // If we have a dom tree, then we now know that the assume doesn't dominate 551 // the other instruction. If we don't have a dom tree then we can check if 552 // the assume is first in the BB. 553 if (!DT) { 554 // Search forward from the assume until we reach the context (or the end 555 // of the block); the common case is that the assume will come first. 556 for (auto I = std::next(BasicBlock::const_iterator(Inv)), 557 IE = Inv->getParent()->end(); I != IE; ++I) 558 if (&*I == CxtI) 559 return true; 560 } 561 562 // The context comes first, but they're both in the same block. Make sure 563 // there is nothing in between that might interrupt the control flow. 564 for (BasicBlock::const_iterator I = 565 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv); 566 I != IE; ++I) 567 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 568 return false; 569 570 return !isEphemeralValueOf(Inv, CxtI); 571 } 572 573 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 574 unsigned Depth, const Query &Q) { 575 // Use of assumptions is context-sensitive. If we don't have a context, we 576 // cannot use them! 577 if (!Q.AC || !Q.CxtI) 578 return; 579 580 unsigned BitWidth = Known.getBitWidth(); 581 582 // Note that the patterns below need to be kept in sync with the code 583 // in AssumptionCache::updateAffectedValues. 584 585 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 586 if (!AssumeVH) 587 continue; 588 CallInst *I = cast<CallInst>(AssumeVH); 589 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 590 "Got assumption for the wrong function!"); 591 if (Q.isExcluded(I)) 592 continue; 593 594 // Warning: This loop can end up being somewhat performance sensitive. 595 // We're running this loop for once for each value queried resulting in a 596 // runtime of ~O(#assumes * #values). 597 598 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 599 "must be an assume intrinsic"); 600 601 Value *Arg = I->getArgOperand(0); 602 603 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 604 assert(BitWidth == 1 && "assume operand is not i1?"); 605 Known.setAllOnes(); 606 return; 607 } 608 if (match(Arg, m_Not(m_Specific(V))) && 609 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 610 assert(BitWidth == 1 && "assume operand is not i1?"); 611 Known.setAllZero(); 612 return; 613 } 614 615 // The remaining tests are all recursive, so bail out if we hit the limit. 616 if (Depth == MaxDepth) 617 continue; 618 619 Value *A, *B; 620 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); 621 622 CmpInst::Predicate Pred; 623 uint64_t C; 624 // assume(v = a) 625 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 626 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 627 KnownBits RHSKnown(BitWidth); 628 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 629 Known.Zero |= RHSKnown.Zero; 630 Known.One |= RHSKnown.One; 631 // assume(v & b = a) 632 } else if (match(Arg, 633 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 634 Pred == ICmpInst::ICMP_EQ && 635 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 636 KnownBits RHSKnown(BitWidth); 637 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 638 KnownBits MaskKnown(BitWidth); 639 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 640 641 // For those bits in the mask that are known to be one, we can propagate 642 // known bits from the RHS to V. 643 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 644 Known.One |= RHSKnown.One & MaskKnown.One; 645 // assume(~(v & b) = a) 646 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 647 m_Value(A))) && 648 Pred == ICmpInst::ICMP_EQ && 649 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 650 KnownBits RHSKnown(BitWidth); 651 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 652 KnownBits MaskKnown(BitWidth); 653 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 654 655 // For those bits in the mask that are known to be one, we can propagate 656 // inverted known bits from the RHS to V. 657 Known.Zero |= RHSKnown.One & MaskKnown.One; 658 Known.One |= RHSKnown.Zero & MaskKnown.One; 659 // assume(v | b = a) 660 } else if (match(Arg, 661 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 662 Pred == ICmpInst::ICMP_EQ && 663 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 664 KnownBits RHSKnown(BitWidth); 665 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 666 KnownBits BKnown(BitWidth); 667 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 668 669 // For those bits in B that are known to be zero, we can propagate known 670 // bits from the RHS to V. 671 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 672 Known.One |= RHSKnown.One & BKnown.Zero; 673 // assume(~(v | b) = a) 674 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 675 m_Value(A))) && 676 Pred == ICmpInst::ICMP_EQ && 677 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 678 KnownBits RHSKnown(BitWidth); 679 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 680 KnownBits BKnown(BitWidth); 681 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 682 683 // For those bits in B that are known to be zero, we can propagate 684 // inverted known bits from the RHS to V. 685 Known.Zero |= RHSKnown.One & BKnown.Zero; 686 Known.One |= RHSKnown.Zero & BKnown.Zero; 687 // assume(v ^ b = a) 688 } else if (match(Arg, 689 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 690 Pred == ICmpInst::ICMP_EQ && 691 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 692 KnownBits RHSKnown(BitWidth); 693 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 694 KnownBits BKnown(BitWidth); 695 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 696 697 // For those bits in B that are known to be zero, we can propagate known 698 // bits from the RHS to V. For those bits in B that are known to be one, 699 // we can propagate inverted known bits from the RHS to V. 700 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 701 Known.One |= RHSKnown.One & BKnown.Zero; 702 Known.Zero |= RHSKnown.One & BKnown.One; 703 Known.One |= RHSKnown.Zero & BKnown.One; 704 // assume(~(v ^ b) = a) 705 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 706 m_Value(A))) && 707 Pred == ICmpInst::ICMP_EQ && 708 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 709 KnownBits RHSKnown(BitWidth); 710 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 711 KnownBits BKnown(BitWidth); 712 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 713 714 // For those bits in B that are known to be zero, we can propagate 715 // inverted known bits from the RHS to V. For those bits in B that are 716 // known to be one, we can propagate known bits from the RHS to V. 717 Known.Zero |= RHSKnown.One & BKnown.Zero; 718 Known.One |= RHSKnown.Zero & BKnown.Zero; 719 Known.Zero |= RHSKnown.Zero & BKnown.One; 720 Known.One |= RHSKnown.One & BKnown.One; 721 // assume(v << c = a) 722 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 723 m_Value(A))) && 724 Pred == ICmpInst::ICMP_EQ && 725 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 726 C < BitWidth) { 727 KnownBits RHSKnown(BitWidth); 728 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 729 // For those bits in RHS that are known, we can propagate them to known 730 // bits in V shifted to the right by C. 731 RHSKnown.Zero.lshrInPlace(C); 732 Known.Zero |= RHSKnown.Zero; 733 RHSKnown.One.lshrInPlace(C); 734 Known.One |= RHSKnown.One; 735 // assume(~(v << c) = a) 736 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 737 m_Value(A))) && 738 Pred == ICmpInst::ICMP_EQ && 739 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 740 C < BitWidth) { 741 KnownBits RHSKnown(BitWidth); 742 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 743 // For those bits in RHS that are known, we can propagate them inverted 744 // to known bits in V shifted to the right by C. 745 RHSKnown.One.lshrInPlace(C); 746 Known.Zero |= RHSKnown.One; 747 RHSKnown.Zero.lshrInPlace(C); 748 Known.One |= RHSKnown.Zero; 749 // assume(v >> c = a) 750 } else if (match(Arg, 751 m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), 752 m_Value(A))) && 753 Pred == ICmpInst::ICMP_EQ && 754 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 755 C < BitWidth) { 756 KnownBits RHSKnown(BitWidth); 757 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 758 // For those bits in RHS that are known, we can propagate them to known 759 // bits in V shifted to the right by C. 760 Known.Zero |= RHSKnown.Zero << C; 761 Known.One |= RHSKnown.One << C; 762 // assume(~(v >> c) = a) 763 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), 764 m_Value(A))) && 765 Pred == ICmpInst::ICMP_EQ && 766 isValidAssumeForContext(I, Q.CxtI, Q.DT) && 767 C < BitWidth) { 768 KnownBits RHSKnown(BitWidth); 769 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 770 // For those bits in RHS that are known, we can propagate them inverted 771 // to known bits in V shifted to the right by C. 772 Known.Zero |= RHSKnown.One << C; 773 Known.One |= RHSKnown.Zero << C; 774 // assume(v >=_s c) where c is non-negative 775 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 776 Pred == ICmpInst::ICMP_SGE && 777 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 778 KnownBits RHSKnown(BitWidth); 779 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 780 781 if (RHSKnown.isNonNegative()) { 782 // We know that the sign bit is zero. 783 Known.makeNonNegative(); 784 } 785 // assume(v >_s c) where c is at least -1. 786 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 787 Pred == ICmpInst::ICMP_SGT && 788 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 789 KnownBits RHSKnown(BitWidth); 790 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 791 792 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { 793 // We know that the sign bit is zero. 794 Known.makeNonNegative(); 795 } 796 // assume(v <=_s c) where c is negative 797 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 798 Pred == ICmpInst::ICMP_SLE && 799 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 800 KnownBits RHSKnown(BitWidth); 801 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 802 803 if (RHSKnown.isNegative()) { 804 // We know that the sign bit is one. 805 Known.makeNegative(); 806 } 807 // assume(v <_s c) where c is non-positive 808 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 809 Pred == ICmpInst::ICMP_SLT && 810 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 811 KnownBits RHSKnown(BitWidth); 812 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 813 814 if (RHSKnown.isZero() || RHSKnown.isNegative()) { 815 // We know that the sign bit is one. 816 Known.makeNegative(); 817 } 818 // assume(v <=_u c) 819 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 820 Pred == ICmpInst::ICMP_ULE && 821 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 822 KnownBits RHSKnown(BitWidth); 823 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 824 825 // Whatever high bits in c are zero are known to be zero. 826 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 827 // assume(v <_u c) 828 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 829 Pred == ICmpInst::ICMP_ULT && 830 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 831 KnownBits RHSKnown(BitWidth); 832 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 833 834 // If the RHS is known zero, then this assumption must be wrong (nothing 835 // is unsigned less than zero). Signal a conflict and get out of here. 836 if (RHSKnown.isZero()) { 837 Known.Zero.setAllBits(); 838 Known.One.setAllBits(); 839 break; 840 } 841 842 // Whatever high bits in c are zero are known to be zero (if c is a power 843 // of 2, then one more). 844 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) 845 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); 846 else 847 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 848 } 849 } 850 851 // If assumptions conflict with each other or previous known bits, then we 852 // have a logical fallacy. It's possible that the assumption is not reachable, 853 // so this isn't a real bug. On the other hand, the program may have undefined 854 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 855 // clear out the known bits, try to warn the user, and hope for the best. 856 if (Known.Zero.intersects(Known.One)) { 857 Known.resetAll(); 858 859 if (Q.ORE) 860 Q.ORE->emit([&]() { 861 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 862 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", 863 CxtI) 864 << "Detected conflicting code assumptions. Program may " 865 "have undefined behavior, or compiler may have " 866 "internal error."; 867 }); 868 } 869 } 870 871 /// Compute known bits from a shift operator, including those with a 872 /// non-constant shift amount. Known is the output of this function. Known2 is a 873 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are 874 /// operator-specific functions that, given the known-zero or known-one bits 875 /// respectively, and a shift amount, compute the implied known-zero or 876 /// known-one bits of the shift operator's result respectively for that shift 877 /// amount. The results from calling KZF and KOF are conservatively combined for 878 /// all permitted shift amounts. 879 static void computeKnownBitsFromShiftOperator( 880 const Operator *I, KnownBits &Known, KnownBits &Known2, 881 unsigned Depth, const Query &Q, 882 function_ref<APInt(const APInt &, unsigned)> KZF, 883 function_ref<APInt(const APInt &, unsigned)> KOF) { 884 unsigned BitWidth = Known.getBitWidth(); 885 886 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 887 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); 888 889 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 890 Known.Zero = KZF(Known.Zero, ShiftAmt); 891 Known.One = KOF(Known.One, ShiftAmt); 892 // If the known bits conflict, this must be an overflowing left shift, so 893 // the shift result is poison. We can return anything we want. Choose 0 for 894 // the best folding opportunity. 895 if (Known.hasConflict()) 896 Known.setAllZero(); 897 898 return; 899 } 900 901 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 902 903 // If the shift amount could be greater than or equal to the bit-width of the 904 // LHS, the value could be poison, but bail out because the check below is 905 // expensive. TODO: Should we just carry on? 906 if ((~Known.Zero).uge(BitWidth)) { 907 Known.resetAll(); 908 return; 909 } 910 911 // Note: We cannot use Known.Zero.getLimitedValue() here, because if 912 // BitWidth > 64 and any upper bits are known, we'll end up returning the 913 // limit value (which implies all bits are known). 914 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); 915 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); 916 917 // It would be more-clearly correct to use the two temporaries for this 918 // calculation. Reusing the APInts here to prevent unnecessary allocations. 919 Known.resetAll(); 920 921 // If we know the shifter operand is nonzero, we can sometimes infer more 922 // known bits. However this is expensive to compute, so be lazy about it and 923 // only compute it when absolutely necessary. 924 Optional<bool> ShifterOperandIsNonZero; 925 926 // Early exit if we can't constrain any well-defined shift amount. 927 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && 928 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { 929 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q); 930 if (!*ShifterOperandIsNonZero) 931 return; 932 } 933 934 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 935 936 Known.Zero.setAllBits(); 937 Known.One.setAllBits(); 938 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 939 // Combine the shifted known input bits only for those shift amounts 940 // compatible with its known constraints. 941 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 942 continue; 943 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 944 continue; 945 // If we know the shifter is nonzero, we may be able to infer more known 946 // bits. This check is sunk down as far as possible to avoid the expensive 947 // call to isKnownNonZero if the cheaper checks above fail. 948 if (ShiftAmt == 0) { 949 if (!ShifterOperandIsNonZero.hasValue()) 950 ShifterOperandIsNonZero = 951 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 952 if (*ShifterOperandIsNonZero) 953 continue; 954 } 955 956 Known.Zero &= KZF(Known2.Zero, ShiftAmt); 957 Known.One &= KOF(Known2.One, ShiftAmt); 958 } 959 960 // If the known bits conflict, the result is poison. Return a 0 and hope the 961 // caller can further optimize that. 962 if (Known.hasConflict()) 963 Known.setAllZero(); 964 } 965 966 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known, 967 unsigned Depth, const Query &Q) { 968 unsigned BitWidth = Known.getBitWidth(); 969 970 KnownBits Known2(Known); 971 switch (I->getOpcode()) { 972 default: break; 973 case Instruction::Load: 974 if (MDNode *MD = 975 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range)) 976 computeKnownBitsFromRangeMetadata(*MD, Known); 977 break; 978 case Instruction::And: { 979 // If either the LHS or the RHS are Zero, the result is zero. 980 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 981 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 982 983 // Output known-1 bits are only known if set in both the LHS & RHS. 984 Known.One &= Known2.One; 985 // Output known-0 are known to be clear if zero in either the LHS | RHS. 986 Known.Zero |= Known2.Zero; 987 988 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 989 // here we handle the more general case of adding any odd number by 990 // matching the form add(x, add(x, y)) where y is odd. 991 // TODO: This could be generalized to clearing any bit set in y where the 992 // following bit is known to be unset in y. 993 Value *X = nullptr, *Y = nullptr; 994 if (!Known.Zero[0] && !Known.One[0] && 995 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) { 996 Known2.resetAll(); 997 computeKnownBits(Y, Known2, Depth + 1, Q); 998 if (Known2.countMinTrailingOnes() > 0) 999 Known.Zero.setBit(0); 1000 } 1001 break; 1002 } 1003 case Instruction::Or: 1004 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 1005 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1006 1007 // Output known-0 bits are only known if clear in both the LHS & RHS. 1008 Known.Zero &= Known2.Zero; 1009 // Output known-1 are known to be set if set in either the LHS | RHS. 1010 Known.One |= Known2.One; 1011 break; 1012 case Instruction::Xor: { 1013 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 1014 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1015 1016 // Output known-0 bits are known if clear or set in both the LHS & RHS. 1017 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 1018 // Output known-1 are known to be set if set in only one of the LHS, RHS. 1019 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 1020 Known.Zero = std::move(KnownZeroOut); 1021 break; 1022 } 1023 case Instruction::Mul: { 1024 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1025 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known, 1026 Known2, Depth, Q); 1027 break; 1028 } 1029 case Instruction::UDiv: { 1030 // For the purposes of computing leading zeros we can conservatively 1031 // treat a udiv as a logical right shift by the power of 2 known to 1032 // be less than the denominator. 1033 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1034 unsigned LeadZ = Known2.countMinLeadingZeros(); 1035 1036 Known2.resetAll(); 1037 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1038 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 1039 if (RHSMaxLeadingZeros != BitWidth) 1040 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 1041 1042 Known.Zero.setHighBits(LeadZ); 1043 break; 1044 } 1045 case Instruction::Select: { 1046 const Value *LHS, *RHS; 1047 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 1048 if (SelectPatternResult::isMinOrMax(SPF)) { 1049 computeKnownBits(RHS, Known, Depth + 1, Q); 1050 computeKnownBits(LHS, Known2, Depth + 1, Q); 1051 } else { 1052 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 1053 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1054 } 1055 1056 unsigned MaxHighOnes = 0; 1057 unsigned MaxHighZeros = 0; 1058 if (SPF == SPF_SMAX) { 1059 // If both sides are negative, the result is negative. 1060 if (Known.isNegative() && Known2.isNegative()) 1061 // We can derive a lower bound on the result by taking the max of the 1062 // leading one bits. 1063 MaxHighOnes = 1064 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1065 // If either side is non-negative, the result is non-negative. 1066 else if (Known.isNonNegative() || Known2.isNonNegative()) 1067 MaxHighZeros = 1; 1068 } else if (SPF == SPF_SMIN) { 1069 // If both sides are non-negative, the result is non-negative. 1070 if (Known.isNonNegative() && Known2.isNonNegative()) 1071 // We can derive an upper bound on the result by taking the max of the 1072 // leading zero bits. 1073 MaxHighZeros = std::max(Known.countMinLeadingZeros(), 1074 Known2.countMinLeadingZeros()); 1075 // If either side is negative, the result is negative. 1076 else if (Known.isNegative() || Known2.isNegative()) 1077 MaxHighOnes = 1; 1078 } else if (SPF == SPF_UMAX) { 1079 // We can derive a lower bound on the result by taking the max of the 1080 // leading one bits. 1081 MaxHighOnes = 1082 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1083 } else if (SPF == SPF_UMIN) { 1084 // We can derive an upper bound on the result by taking the max of the 1085 // leading zero bits. 1086 MaxHighZeros = 1087 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1088 } else if (SPF == SPF_ABS) { 1089 // RHS from matchSelectPattern returns the negation part of abs pattern. 1090 // If the negate has an NSW flag we can assume the sign bit of the result 1091 // will be 0 because that makes abs(INT_MIN) undefined. 1092 if (Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) 1093 MaxHighZeros = 1; 1094 } 1095 1096 // Only known if known in both the LHS and RHS. 1097 Known.One &= Known2.One; 1098 Known.Zero &= Known2.Zero; 1099 if (MaxHighOnes > 0) 1100 Known.One.setHighBits(MaxHighOnes); 1101 if (MaxHighZeros > 0) 1102 Known.Zero.setHighBits(MaxHighZeros); 1103 break; 1104 } 1105 case Instruction::FPTrunc: 1106 case Instruction::FPExt: 1107 case Instruction::FPToUI: 1108 case Instruction::FPToSI: 1109 case Instruction::SIToFP: 1110 case Instruction::UIToFP: 1111 break; // Can't work with floating point. 1112 case Instruction::PtrToInt: 1113 case Instruction::IntToPtr: 1114 // Fall through and handle them the same as zext/trunc. 1115 LLVM_FALLTHROUGH; 1116 case Instruction::ZExt: 1117 case Instruction::Trunc: { 1118 Type *SrcTy = I->getOperand(0)->getType(); 1119 1120 unsigned SrcBitWidth; 1121 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1122 // which fall through here. 1123 Type *ScalarTy = SrcTy->getScalarType(); 1124 SrcBitWidth = ScalarTy->isPointerTy() ? 1125 Q.DL.getIndexTypeSizeInBits(ScalarTy) : 1126 Q.DL.getTypeSizeInBits(ScalarTy); 1127 1128 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1129 Known = Known.zextOrTrunc(SrcBitWidth, false); 1130 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1131 Known = Known.zextOrTrunc(BitWidth, true /* ExtendedBitsAreKnownZero */); 1132 break; 1133 } 1134 case Instruction::BitCast: { 1135 Type *SrcTy = I->getOperand(0)->getType(); 1136 if (SrcTy->isIntOrPtrTy() && 1137 // TODO: For now, not handling conversions like: 1138 // (bitcast i64 %x to <2 x i32>) 1139 !I->getType()->isVectorTy()) { 1140 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1141 break; 1142 } 1143 break; 1144 } 1145 case Instruction::SExt: { 1146 // Compute the bits in the result that are not present in the input. 1147 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1148 1149 Known = Known.trunc(SrcBitWidth); 1150 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1151 // If the sign bit of the input is known set or clear, then we know the 1152 // top bits of the result. 1153 Known = Known.sext(BitWidth); 1154 break; 1155 } 1156 case Instruction::Shl: { 1157 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1158 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1159 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) { 1160 APInt KZResult = KnownZero << ShiftAmt; 1161 KZResult.setLowBits(ShiftAmt); // Low bits known 0. 1162 // If this shift has "nsw" keyword, then the result is either a poison 1163 // value or has the same sign bit as the first operand. 1164 if (NSW && KnownZero.isSignBitSet()) 1165 KZResult.setSignBit(); 1166 return KZResult; 1167 }; 1168 1169 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) { 1170 APInt KOResult = KnownOne << ShiftAmt; 1171 if (NSW && KnownOne.isSignBitSet()) 1172 KOResult.setSignBit(); 1173 return KOResult; 1174 }; 1175 1176 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1177 break; 1178 } 1179 case Instruction::LShr: { 1180 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1181 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1182 APInt KZResult = KnownZero.lshr(ShiftAmt); 1183 // High bits known zero. 1184 KZResult.setHighBits(ShiftAmt); 1185 return KZResult; 1186 }; 1187 1188 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1189 return KnownOne.lshr(ShiftAmt); 1190 }; 1191 1192 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1193 break; 1194 } 1195 case Instruction::AShr: { 1196 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1197 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1198 return KnownZero.ashr(ShiftAmt); 1199 }; 1200 1201 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1202 return KnownOne.ashr(ShiftAmt); 1203 }; 1204 1205 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1206 break; 1207 } 1208 case Instruction::Sub: { 1209 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1210 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1211 Known, Known2, Depth, Q); 1212 break; 1213 } 1214 case Instruction::Add: { 1215 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1216 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1217 Known, Known2, Depth, Q); 1218 break; 1219 } 1220 case Instruction::SRem: 1221 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1222 APInt RA = Rem->getValue().abs(); 1223 if (RA.isPowerOf2()) { 1224 APInt LowBits = RA - 1; 1225 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1226 1227 // The low bits of the first operand are unchanged by the srem. 1228 Known.Zero = Known2.Zero & LowBits; 1229 Known.One = Known2.One & LowBits; 1230 1231 // If the first operand is non-negative or has all low bits zero, then 1232 // the upper bits are all zero. 1233 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 1234 Known.Zero |= ~LowBits; 1235 1236 // If the first operand is negative and not all low bits are zero, then 1237 // the upper bits are all one. 1238 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 1239 Known.One |= ~LowBits; 1240 1241 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1242 break; 1243 } 1244 } 1245 1246 // The sign bit is the LHS's sign bit, except when the result of the 1247 // remainder is zero. 1248 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1249 // If it's known zero, our sign bit is also zero. 1250 if (Known2.isNonNegative()) 1251 Known.makeNonNegative(); 1252 1253 break; 1254 case Instruction::URem: { 1255 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1256 const APInt &RA = Rem->getValue(); 1257 if (RA.isPowerOf2()) { 1258 APInt LowBits = (RA - 1); 1259 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1260 Known.Zero |= ~LowBits; 1261 Known.One &= LowBits; 1262 break; 1263 } 1264 } 1265 1266 // Since the result is less than or equal to either operand, any leading 1267 // zero bits in either operand must also exist in the result. 1268 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1269 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1270 1271 unsigned Leaders = 1272 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1273 Known.resetAll(); 1274 Known.Zero.setHighBits(Leaders); 1275 break; 1276 } 1277 1278 case Instruction::Alloca: { 1279 const AllocaInst *AI = cast<AllocaInst>(I); 1280 unsigned Align = AI->getAlignment(); 1281 if (Align == 0) 1282 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); 1283 1284 if (Align > 0) 1285 Known.Zero.setLowBits(countTrailingZeros(Align)); 1286 break; 1287 } 1288 case Instruction::GetElementPtr: { 1289 // Analyze all of the subscripts of this getelementptr instruction 1290 // to determine if we can prove known low zero bits. 1291 KnownBits LocalKnown(BitWidth); 1292 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q); 1293 unsigned TrailZ = LocalKnown.countMinTrailingZeros(); 1294 1295 gep_type_iterator GTI = gep_type_begin(I); 1296 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1297 Value *Index = I->getOperand(i); 1298 if (StructType *STy = GTI.getStructTypeOrNull()) { 1299 // Handle struct member offset arithmetic. 1300 1301 // Handle case when index is vector zeroinitializer 1302 Constant *CIndex = cast<Constant>(Index); 1303 if (CIndex->isZeroValue()) 1304 continue; 1305 1306 if (CIndex->getType()->isVectorTy()) 1307 Index = CIndex->getSplatValue(); 1308 1309 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1310 const StructLayout *SL = Q.DL.getStructLayout(STy); 1311 uint64_t Offset = SL->getElementOffset(Idx); 1312 TrailZ = std::min<unsigned>(TrailZ, 1313 countTrailingZeros(Offset)); 1314 } else { 1315 // Handle array index arithmetic. 1316 Type *IndexedTy = GTI.getIndexedType(); 1317 if (!IndexedTy->isSized()) { 1318 TrailZ = 0; 1319 break; 1320 } 1321 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1322 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1323 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0); 1324 computeKnownBits(Index, LocalKnown, Depth + 1, Q); 1325 TrailZ = std::min(TrailZ, 1326 unsigned(countTrailingZeros(TypeSize) + 1327 LocalKnown.countMinTrailingZeros())); 1328 } 1329 } 1330 1331 Known.Zero.setLowBits(TrailZ); 1332 break; 1333 } 1334 case Instruction::PHI: { 1335 const PHINode *P = cast<PHINode>(I); 1336 // Handle the case of a simple two-predecessor recurrence PHI. 1337 // There's a lot more that could theoretically be done here, but 1338 // this is sufficient to catch some interesting cases. 1339 if (P->getNumIncomingValues() == 2) { 1340 for (unsigned i = 0; i != 2; ++i) { 1341 Value *L = P->getIncomingValue(i); 1342 Value *R = P->getIncomingValue(!i); 1343 Operator *LU = dyn_cast<Operator>(L); 1344 if (!LU) 1345 continue; 1346 unsigned Opcode = LU->getOpcode(); 1347 // Check for operations that have the property that if 1348 // both their operands have low zero bits, the result 1349 // will have low zero bits. 1350 if (Opcode == Instruction::Add || 1351 Opcode == Instruction::Sub || 1352 Opcode == Instruction::And || 1353 Opcode == Instruction::Or || 1354 Opcode == Instruction::Mul) { 1355 Value *LL = LU->getOperand(0); 1356 Value *LR = LU->getOperand(1); 1357 // Find a recurrence. 1358 if (LL == I) 1359 L = LR; 1360 else if (LR == I) 1361 L = LL; 1362 else 1363 break; 1364 // Ok, we have a PHI of the form L op= R. Check for low 1365 // zero bits. 1366 computeKnownBits(R, Known2, Depth + 1, Q); 1367 1368 // We need to take the minimum number of known bits 1369 KnownBits Known3(Known); 1370 computeKnownBits(L, Known3, Depth + 1, Q); 1371 1372 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1373 Known3.countMinTrailingZeros())); 1374 1375 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU); 1376 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) { 1377 // If initial value of recurrence is nonnegative, and we are adding 1378 // a nonnegative number with nsw, the result can only be nonnegative 1379 // or poison value regardless of the number of times we execute the 1380 // add in phi recurrence. If initial value is negative and we are 1381 // adding a negative number with nsw, the result can only be 1382 // negative or poison value. Similar arguments apply to sub and mul. 1383 // 1384 // (add non-negative, non-negative) --> non-negative 1385 // (add negative, negative) --> negative 1386 if (Opcode == Instruction::Add) { 1387 if (Known2.isNonNegative() && Known3.isNonNegative()) 1388 Known.makeNonNegative(); 1389 else if (Known2.isNegative() && Known3.isNegative()) 1390 Known.makeNegative(); 1391 } 1392 1393 // (sub nsw non-negative, negative) --> non-negative 1394 // (sub nsw negative, non-negative) --> negative 1395 else if (Opcode == Instruction::Sub && LL == I) { 1396 if (Known2.isNonNegative() && Known3.isNegative()) 1397 Known.makeNonNegative(); 1398 else if (Known2.isNegative() && Known3.isNonNegative()) 1399 Known.makeNegative(); 1400 } 1401 1402 // (mul nsw non-negative, non-negative) --> non-negative 1403 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1404 Known3.isNonNegative()) 1405 Known.makeNonNegative(); 1406 } 1407 1408 break; 1409 } 1410 } 1411 } 1412 1413 // Unreachable blocks may have zero-operand PHI nodes. 1414 if (P->getNumIncomingValues() == 0) 1415 break; 1416 1417 // Otherwise take the unions of the known bit sets of the operands, 1418 // taking conservative care to avoid excessive recursion. 1419 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) { 1420 // Skip if every incoming value references to ourself. 1421 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1422 break; 1423 1424 Known.Zero.setAllBits(); 1425 Known.One.setAllBits(); 1426 for (Value *IncValue : P->incoming_values()) { 1427 // Skip direct self references. 1428 if (IncValue == P) continue; 1429 1430 Known2 = KnownBits(BitWidth); 1431 // Recurse, but cap the recursion to one level, because we don't 1432 // want to waste time spinning around in loops. 1433 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q); 1434 Known.Zero &= Known2.Zero; 1435 Known.One &= Known2.One; 1436 // If all bits have been ruled out, there's no need to check 1437 // more operands. 1438 if (!Known.Zero && !Known.One) 1439 break; 1440 } 1441 } 1442 break; 1443 } 1444 case Instruction::Call: 1445 case Instruction::Invoke: 1446 // If range metadata is attached to this call, set known bits from that, 1447 // and then intersect with known bits based on other properties of the 1448 // function. 1449 if (MDNode *MD = 1450 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range)) 1451 computeKnownBitsFromRangeMetadata(*MD, Known); 1452 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) { 1453 computeKnownBits(RV, Known2, Depth + 1, Q); 1454 Known.Zero |= Known2.Zero; 1455 Known.One |= Known2.One; 1456 } 1457 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1458 switch (II->getIntrinsicID()) { 1459 default: break; 1460 case Intrinsic::bitreverse: 1461 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1462 Known.Zero |= Known2.Zero.reverseBits(); 1463 Known.One |= Known2.One.reverseBits(); 1464 break; 1465 case Intrinsic::bswap: 1466 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1467 Known.Zero |= Known2.Zero.byteSwap(); 1468 Known.One |= Known2.One.byteSwap(); 1469 break; 1470 case Intrinsic::ctlz: { 1471 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1472 // If we have a known 1, its position is our upper bound. 1473 unsigned PossibleLZ = Known2.One.countLeadingZeros(); 1474 // If this call is undefined for 0, the result will be less than 2^n. 1475 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1476 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1477 unsigned LowBits = Log2_32(PossibleLZ)+1; 1478 Known.Zero.setBitsFrom(LowBits); 1479 break; 1480 } 1481 case Intrinsic::cttz: { 1482 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1483 // If we have a known 1, its position is our upper bound. 1484 unsigned PossibleTZ = Known2.One.countTrailingZeros(); 1485 // If this call is undefined for 0, the result will be less than 2^n. 1486 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1487 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1488 unsigned LowBits = Log2_32(PossibleTZ)+1; 1489 Known.Zero.setBitsFrom(LowBits); 1490 break; 1491 } 1492 case Intrinsic::ctpop: { 1493 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1494 // We can bound the space the count needs. Also, bits known to be zero 1495 // can't contribute to the population. 1496 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1497 unsigned LowBits = Log2_32(BitsPossiblySet)+1; 1498 Known.Zero.setBitsFrom(LowBits); 1499 // TODO: we could bound KnownOne using the lower bound on the number 1500 // of bits which might be set provided by popcnt KnownOne2. 1501 break; 1502 } 1503 case Intrinsic::fshr: 1504 case Intrinsic::fshl: { 1505 const APInt *SA; 1506 if (!match(I->getOperand(2), m_APInt(SA))) 1507 break; 1508 1509 // Normalize to funnel shift left. 1510 uint64_t ShiftAmt = SA->urem(BitWidth); 1511 if (II->getIntrinsicID() == Intrinsic::fshr) 1512 ShiftAmt = BitWidth - ShiftAmt; 1513 1514 KnownBits Known3(Known); 1515 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1516 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q); 1517 1518 Known.Zero = 1519 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); 1520 Known.One = 1521 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt); 1522 break; 1523 } 1524 case Intrinsic::uadd_sat: 1525 case Intrinsic::usub_sat: { 1526 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat; 1527 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1528 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1529 1530 // Add: Leading ones of either operand are preserved. 1531 // Sub: Leading zeros of LHS and leading ones of RHS are preserved 1532 // as leading zeros in the result. 1533 unsigned LeadingKnown; 1534 if (IsAdd) 1535 LeadingKnown = std::max(Known.countMinLeadingOnes(), 1536 Known2.countMinLeadingOnes()); 1537 else 1538 LeadingKnown = std::max(Known.countMinLeadingZeros(), 1539 Known2.countMinLeadingOnes()); 1540 1541 Known = KnownBits::computeForAddSub( 1542 IsAdd, /* NSW */ false, Known, Known2); 1543 1544 // We select between the operation result and all-ones/zero 1545 // respectively, so we can preserve known ones/zeros. 1546 if (IsAdd) { 1547 Known.One.setHighBits(LeadingKnown); 1548 Known.Zero.clearAllBits(); 1549 } else { 1550 Known.Zero.setHighBits(LeadingKnown); 1551 Known.One.clearAllBits(); 1552 } 1553 break; 1554 } 1555 case Intrinsic::x86_sse42_crc32_64_64: 1556 Known.Zero.setBitsFrom(32); 1557 break; 1558 } 1559 } 1560 break; 1561 case Instruction::ExtractElement: 1562 // Look through extract element. At the moment we keep this simple and skip 1563 // tracking the specific element. But at least we might find information 1564 // valid for all elements of the vector (for example if vector is sign 1565 // extended, shifted, etc). 1566 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1567 break; 1568 case Instruction::ExtractValue: 1569 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1570 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1571 if (EVI->getNumIndices() != 1) break; 1572 if (EVI->getIndices()[0] == 0) { 1573 switch (II->getIntrinsicID()) { 1574 default: break; 1575 case Intrinsic::uadd_with_overflow: 1576 case Intrinsic::sadd_with_overflow: 1577 computeKnownBitsAddSub(true, II->getArgOperand(0), 1578 II->getArgOperand(1), false, Known, Known2, 1579 Depth, Q); 1580 break; 1581 case Intrinsic::usub_with_overflow: 1582 case Intrinsic::ssub_with_overflow: 1583 computeKnownBitsAddSub(false, II->getArgOperand(0), 1584 II->getArgOperand(1), false, Known, Known2, 1585 Depth, Q); 1586 break; 1587 case Intrinsic::umul_with_overflow: 1588 case Intrinsic::smul_with_overflow: 1589 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1590 Known, Known2, Depth, Q); 1591 break; 1592 } 1593 } 1594 } 1595 } 1596 } 1597 1598 /// Determine which bits of V are known to be either zero or one and return 1599 /// them. 1600 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { 1601 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1602 computeKnownBits(V, Known, Depth, Q); 1603 return Known; 1604 } 1605 1606 /// Determine which bits of V are known to be either zero or one and return 1607 /// them in the Known bit set. 1608 /// 1609 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1610 /// we cannot optimize based on the assumption that it is zero without changing 1611 /// it to be an explicit zero. If we don't change it to zero, other code could 1612 /// optimized based on the contradictory assumption that it is non-zero. 1613 /// Because instcombine aggressively folds operations with undef args anyway, 1614 /// this won't lose us code quality. 1615 /// 1616 /// This function is defined on values with integer type, values with pointer 1617 /// type, and vectors of integers. In the case 1618 /// where V is a vector, known zero, and known one values are the 1619 /// same width as the vector element, and the bit is set only if it is true 1620 /// for all of the elements in the vector. 1621 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 1622 const Query &Q) { 1623 assert(V && "No Value?"); 1624 assert(Depth <= MaxDepth && "Limit Search Depth"); 1625 unsigned BitWidth = Known.getBitWidth(); 1626 1627 assert((V->getType()->isIntOrIntVectorTy(BitWidth) || 1628 V->getType()->isPtrOrPtrVectorTy()) && 1629 "Not integer or pointer type!"); 1630 1631 Type *ScalarTy = V->getType()->getScalarType(); 1632 unsigned ExpectedWidth = ScalarTy->isPointerTy() ? 1633 Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy); 1634 assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth"); 1635 (void)BitWidth; 1636 (void)ExpectedWidth; 1637 1638 const APInt *C; 1639 if (match(V, m_APInt(C))) { 1640 // We know all of the bits for a scalar constant or a splat vector constant! 1641 Known.One = *C; 1642 Known.Zero = ~Known.One; 1643 return; 1644 } 1645 // Null and aggregate-zero are all-zeros. 1646 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1647 Known.setAllZero(); 1648 return; 1649 } 1650 // Handle a constant vector by taking the intersection of the known bits of 1651 // each element. 1652 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 1653 // We know that CDS must be a vector of integers. Take the intersection of 1654 // each element. 1655 Known.Zero.setAllBits(); Known.One.setAllBits(); 1656 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1657 APInt Elt = CDS->getElementAsAPInt(i); 1658 Known.Zero &= ~Elt; 1659 Known.One &= Elt; 1660 } 1661 return; 1662 } 1663 1664 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1665 // We know that CV must be a vector of integers. Take the intersection of 1666 // each element. 1667 Known.Zero.setAllBits(); Known.One.setAllBits(); 1668 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1669 Constant *Element = CV->getAggregateElement(i); 1670 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1671 if (!ElementCI) { 1672 Known.resetAll(); 1673 return; 1674 } 1675 const APInt &Elt = ElementCI->getValue(); 1676 Known.Zero &= ~Elt; 1677 Known.One &= Elt; 1678 } 1679 return; 1680 } 1681 1682 // Start out not knowing anything. 1683 Known.resetAll(); 1684 1685 // We can't imply anything about undefs. 1686 if (isa<UndefValue>(V)) 1687 return; 1688 1689 // There's no point in looking through other users of ConstantData for 1690 // assumptions. Confirm that we've handled them all. 1691 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1692 1693 // Limit search depth. 1694 // All recursive calls that increase depth must come after this. 1695 if (Depth == MaxDepth) 1696 return; 1697 1698 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1699 // the bits of its aliasee. 1700 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1701 if (!GA->isInterposable()) 1702 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 1703 return; 1704 } 1705 1706 if (const Operator *I = dyn_cast<Operator>(V)) 1707 computeKnownBitsFromOperator(I, Known, Depth, Q); 1708 1709 // Aligned pointers have trailing zeros - refine Known.Zero set 1710 if (V->getType()->isPointerTy()) { 1711 unsigned Align = V->getPointerAlignment(Q.DL); 1712 if (Align) 1713 Known.Zero.setLowBits(countTrailingZeros(Align)); 1714 } 1715 1716 // computeKnownBitsFromAssume strictly refines Known. 1717 // Therefore, we run them after computeKnownBitsFromOperator. 1718 1719 // Check whether a nearby assume intrinsic can determine some known bits. 1720 computeKnownBitsFromAssume(V, Known, Depth, Q); 1721 1722 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1723 } 1724 1725 /// Return true if the given value is known to have exactly one 1726 /// bit set when defined. For vectors return true if every element is known to 1727 /// be a power of two when defined. Supports values with integer or pointer 1728 /// types and vectors of integers. 1729 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 1730 const Query &Q) { 1731 assert(Depth <= MaxDepth && "Limit Search Depth"); 1732 1733 // Attempt to match against constants. 1734 if (OrZero && match(V, m_Power2OrZero())) 1735 return true; 1736 if (match(V, m_Power2())) 1737 return true; 1738 1739 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1740 // it is shifted off the end then the result is undefined. 1741 if (match(V, m_Shl(m_One(), m_Value()))) 1742 return true; 1743 1744 // (signmask) >>l X is clearly a power of two if the one is not shifted off 1745 // the bottom. If it is shifted off the bottom then the result is undefined. 1746 if (match(V, m_LShr(m_SignMask(), m_Value()))) 1747 return true; 1748 1749 // The remaining tests are all recursive, so bail out if we hit the limit. 1750 if (Depth++ == MaxDepth) 1751 return false; 1752 1753 Value *X = nullptr, *Y = nullptr; 1754 // A shift left or a logical shift right of a power of two is a power of two 1755 // or zero. 1756 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1757 match(V, m_LShr(m_Value(X), m_Value())))) 1758 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 1759 1760 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1761 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 1762 1763 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 1764 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 1765 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 1766 1767 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1768 // A power of two and'd with anything is a power of two or zero. 1769 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 1770 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 1771 return true; 1772 // X & (-X) is always a power of two or zero. 1773 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1774 return true; 1775 return false; 1776 } 1777 1778 // Adding a power-of-two or zero to the same power-of-two or zero yields 1779 // either the original power-of-two, a larger power-of-two or zero. 1780 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1781 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1782 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) || 1783 Q.IIQ.hasNoSignedWrap(VOBO)) { 1784 if (match(X, m_And(m_Specific(Y), m_Value())) || 1785 match(X, m_And(m_Value(), m_Specific(Y)))) 1786 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 1787 return true; 1788 if (match(Y, m_And(m_Specific(X), m_Value())) || 1789 match(Y, m_And(m_Value(), m_Specific(X)))) 1790 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 1791 return true; 1792 1793 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1794 KnownBits LHSBits(BitWidth); 1795 computeKnownBits(X, LHSBits, Depth, Q); 1796 1797 KnownBits RHSBits(BitWidth); 1798 computeKnownBits(Y, RHSBits, Depth, Q); 1799 // If i8 V is a power of two or zero: 1800 // ZeroBits: 1 1 1 0 1 1 1 1 1801 // ~ZeroBits: 0 0 0 1 0 0 0 0 1802 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 1803 // If OrZero isn't set, we cannot give back a zero result. 1804 // Make sure either the LHS or RHS has a bit set. 1805 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 1806 return true; 1807 } 1808 } 1809 1810 // An exact divide or right shift can only shift off zero bits, so the result 1811 // is a power of two only if the first operand is a power of two and not 1812 // copying a sign bit (sdiv int_min, 2). 1813 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1814 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1815 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1816 Depth, Q); 1817 } 1818 1819 return false; 1820 } 1821 1822 /// Test whether a GEP's result is known to be non-null. 1823 /// 1824 /// Uses properties inherent in a GEP to try to determine whether it is known 1825 /// to be non-null. 1826 /// 1827 /// Currently this routine does not support vector GEPs. 1828 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 1829 const Query &Q) { 1830 const Function *F = nullptr; 1831 if (const Instruction *I = dyn_cast<Instruction>(GEP)) 1832 F = I->getFunction(); 1833 1834 if (!GEP->isInBounds() || 1835 NullPointerIsDefined(F, GEP->getPointerAddressSpace())) 1836 return false; 1837 1838 // FIXME: Support vector-GEPs. 1839 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1840 1841 // If the base pointer is non-null, we cannot walk to a null address with an 1842 // inbounds GEP in address space zero. 1843 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 1844 return true; 1845 1846 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1847 // If so, then the GEP cannot produce a null pointer, as doing so would 1848 // inherently violate the inbounds contract within address space zero. 1849 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1850 GTI != GTE; ++GTI) { 1851 // Struct types are easy -- they must always be indexed by a constant. 1852 if (StructType *STy = GTI.getStructTypeOrNull()) { 1853 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1854 unsigned ElementIdx = OpC->getZExtValue(); 1855 const StructLayout *SL = Q.DL.getStructLayout(STy); 1856 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1857 if (ElementOffset > 0) 1858 return true; 1859 continue; 1860 } 1861 1862 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1863 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1864 continue; 1865 1866 // Fast path the constant operand case both for efficiency and so we don't 1867 // increment Depth when just zipping down an all-constant GEP. 1868 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1869 if (!OpC->isZero()) 1870 return true; 1871 continue; 1872 } 1873 1874 // We post-increment Depth here because while isKnownNonZero increments it 1875 // as well, when we pop back up that increment won't persist. We don't want 1876 // to recurse 10k times just because we have 10k GEP operands. We don't 1877 // bail completely out because we want to handle constant GEPs regardless 1878 // of depth. 1879 if (Depth++ >= MaxDepth) 1880 continue; 1881 1882 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 1883 return true; 1884 } 1885 1886 return false; 1887 } 1888 1889 static bool isKnownNonNullFromDominatingCondition(const Value *V, 1890 const Instruction *CtxI, 1891 const DominatorTree *DT) { 1892 assert(V->getType()->isPointerTy() && "V must be pointer type"); 1893 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull"); 1894 1895 if (!CtxI || !DT) 1896 return false; 1897 1898 unsigned NumUsesExplored = 0; 1899 for (auto *U : V->users()) { 1900 // Avoid massive lists 1901 if (NumUsesExplored >= DomConditionsMaxUses) 1902 break; 1903 NumUsesExplored++; 1904 1905 // If the value is used as an argument to a call or invoke, then argument 1906 // attributes may provide an answer about null-ness. 1907 if (auto CS = ImmutableCallSite(U)) 1908 if (auto *CalledFunc = CS.getCalledFunction()) 1909 for (const Argument &Arg : CalledFunc->args()) 1910 if (CS.getArgOperand(Arg.getArgNo()) == V && 1911 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) 1912 return true; 1913 1914 // Consider only compare instructions uniquely controlling a branch 1915 CmpInst::Predicate Pred; 1916 if (!match(const_cast<User *>(U), 1917 m_c_ICmp(Pred, m_Specific(V), m_Zero())) || 1918 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) 1919 continue; 1920 1921 SmallVector<const User *, 4> WorkList; 1922 SmallPtrSet<const User *, 4> Visited; 1923 for (auto *CmpU : U->users()) { 1924 assert(WorkList.empty() && "Should be!"); 1925 if (Visited.insert(CmpU).second) 1926 WorkList.push_back(CmpU); 1927 1928 while (!WorkList.empty()) { 1929 auto *Curr = WorkList.pop_back_val(); 1930 1931 // If a user is an AND, add all its users to the work list. We only 1932 // propagate "pred != null" condition through AND because it is only 1933 // correct to assume that all conditions of AND are met in true branch. 1934 // TODO: Support similar logic of OR and EQ predicate? 1935 if (Pred == ICmpInst::ICMP_NE) 1936 if (auto *BO = dyn_cast<BinaryOperator>(Curr)) 1937 if (BO->getOpcode() == Instruction::And) { 1938 for (auto *BOU : BO->users()) 1939 if (Visited.insert(BOU).second) 1940 WorkList.push_back(BOU); 1941 continue; 1942 } 1943 1944 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) { 1945 assert(BI->isConditional() && "uses a comparison!"); 1946 1947 BasicBlock *NonNullSuccessor = 1948 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); 1949 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 1950 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 1951 return true; 1952 } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) && 1953 DT->dominates(cast<Instruction>(Curr), CtxI)) { 1954 return true; 1955 } 1956 } 1957 } 1958 } 1959 1960 return false; 1961 } 1962 1963 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1964 /// ensure that the value it's attached to is never Value? 'RangeType' is 1965 /// is the type of the value described by the range. 1966 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 1967 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1968 assert(NumRanges >= 1); 1969 for (unsigned i = 0; i < NumRanges; ++i) { 1970 ConstantInt *Lower = 1971 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1972 ConstantInt *Upper = 1973 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1974 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1975 if (Range.contains(Value)) 1976 return false; 1977 } 1978 return true; 1979 } 1980 1981 /// Return true if the given value is known to be non-zero when defined. For 1982 /// vectors, return true if every element is known to be non-zero when 1983 /// defined. For pointers, if the context instruction and dominator tree are 1984 /// specified, perform context-sensitive analysis and return true if the 1985 /// pointer couldn't possibly be null at the specified instruction. 1986 /// Supports values with integer or pointer type and vectors of integers. 1987 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) { 1988 if (auto *C = dyn_cast<Constant>(V)) { 1989 if (C->isNullValue()) 1990 return false; 1991 if (isa<ConstantInt>(C)) 1992 // Must be non-zero due to null test above. 1993 return true; 1994 1995 // For constant vectors, check that all elements are undefined or known 1996 // non-zero to determine that the whole vector is known non-zero. 1997 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) { 1998 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 1999 Constant *Elt = C->getAggregateElement(i); 2000 if (!Elt || Elt->isNullValue()) 2001 return false; 2002 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 2003 return false; 2004 } 2005 return true; 2006 } 2007 2008 // A global variable in address space 0 is non null unless extern weak 2009 // or an absolute symbol reference. Other address spaces may have null as a 2010 // valid address for a global, so we can't assume anything. 2011 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 2012 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 2013 GV->getType()->getAddressSpace() == 0) 2014 return true; 2015 } else 2016 return false; 2017 } 2018 2019 if (auto *I = dyn_cast<Instruction>(V)) { 2020 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) { 2021 // If the possible ranges don't contain zero, then the value is 2022 // definitely non-zero. 2023 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 2024 const APInt ZeroValue(Ty->getBitWidth(), 0); 2025 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 2026 return true; 2027 } 2028 } 2029 } 2030 2031 // Some of the tests below are recursive, so bail out if we hit the limit. 2032 if (Depth++ >= MaxDepth) 2033 return false; 2034 2035 // Check for pointer simplifications. 2036 if (V->getType()->isPointerTy()) { 2037 // Alloca never returns null, malloc might. 2038 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) 2039 return true; 2040 2041 // A byval, inalloca, or nonnull argument is never null. 2042 if (const Argument *A = dyn_cast<Argument>(V)) 2043 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr()) 2044 return true; 2045 2046 // A Load tagged with nonnull metadata is never null. 2047 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 2048 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull)) 2049 return true; 2050 2051 if (const auto *Call = dyn_cast<CallBase>(V)) { 2052 if (Call->isReturnNonNull()) 2053 return true; 2054 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call)) 2055 return isKnownNonZero(RP, Depth, Q); 2056 } 2057 } 2058 2059 2060 // Check for recursive pointer simplifications. 2061 if (V->getType()->isPointerTy()) { 2062 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) 2063 return true; 2064 2065 // Look through bitcast operations, GEPs, and int2ptr instructions as they 2066 // do not alter the value, or at least not the nullness property of the 2067 // value, e.g., int2ptr is allowed to zero/sign extend the value. 2068 // 2069 // Note that we have to take special care to avoid looking through 2070 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well 2071 // as casts that can alter the value, e.g., AddrSpaceCasts. 2072 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 2073 if (isGEPKnownNonNull(GEP, Depth, Q)) 2074 return true; 2075 2076 if (auto *BCO = dyn_cast<BitCastOperator>(V)) 2077 return isKnownNonZero(BCO->getOperand(0), Depth, Q); 2078 2079 if (auto *I2P = dyn_cast<IntToPtrInst>(V)) 2080 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <= 2081 Q.DL.getTypeSizeInBits(I2P->getDestTy())) 2082 return isKnownNonZero(I2P->getOperand(0), Depth, Q); 2083 } 2084 2085 // Similar to int2ptr above, we can look through ptr2int here if the cast 2086 // is a no-op or an extend and not a truncate. 2087 if (auto *P2I = dyn_cast<PtrToIntInst>(V)) 2088 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <= 2089 Q.DL.getTypeSizeInBits(P2I->getDestTy())) 2090 return isKnownNonZero(P2I->getOperand(0), Depth, Q); 2091 2092 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 2093 2094 // X | Y != 0 if X != 0 or Y != 0. 2095 Value *X = nullptr, *Y = nullptr; 2096 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 2097 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q); 2098 2099 // ext X != 0 if X != 0. 2100 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 2101 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 2102 2103 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 2104 // if the lowest bit is shifted off the end. 2105 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { 2106 // shl nuw can't remove any non-zero bits. 2107 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2108 if (Q.IIQ.hasNoUnsignedWrap(BO)) 2109 return isKnownNonZero(X, Depth, Q); 2110 2111 KnownBits Known(BitWidth); 2112 computeKnownBits(X, Known, Depth, Q); 2113 if (Known.One[0]) 2114 return true; 2115 } 2116 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 2117 // defined if the sign bit is shifted off the end. 2118 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 2119 // shr exact can only shift out zero bits. 2120 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 2121 if (BO->isExact()) 2122 return isKnownNonZero(X, Depth, Q); 2123 2124 KnownBits Known = computeKnownBits(X, Depth, Q); 2125 if (Known.isNegative()) 2126 return true; 2127 2128 // If the shifter operand is a constant, and all of the bits shifted 2129 // out are known to be zero, and X is known non-zero then at least one 2130 // non-zero bit must remain. 2131 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 2132 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 2133 // Is there a known one in the portion not shifted out? 2134 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) 2135 return true; 2136 // Are all the bits to be shifted out known zero? 2137 if (Known.countMinTrailingZeros() >= ShiftVal) 2138 return isKnownNonZero(X, Depth, Q); 2139 } 2140 } 2141 // div exact can only produce a zero if the dividend is zero. 2142 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 2143 return isKnownNonZero(X, Depth, Q); 2144 } 2145 // X + Y. 2146 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2147 KnownBits XKnown = computeKnownBits(X, Depth, Q); 2148 KnownBits YKnown = computeKnownBits(Y, Depth, Q); 2149 2150 // If X and Y are both non-negative (as signed values) then their sum is not 2151 // zero unless both X and Y are zero. 2152 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 2153 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q)) 2154 return true; 2155 2156 // If X and Y are both negative (as signed values) then their sum is not 2157 // zero unless both X and Y equal INT_MIN. 2158 if (XKnown.isNegative() && YKnown.isNegative()) { 2159 APInt Mask = APInt::getSignedMaxValue(BitWidth); 2160 // The sign bit of X is set. If some other bit is set then X is not equal 2161 // to INT_MIN. 2162 if (XKnown.One.intersects(Mask)) 2163 return true; 2164 // The sign bit of Y is set. If some other bit is set then Y is not equal 2165 // to INT_MIN. 2166 if (YKnown.One.intersects(Mask)) 2167 return true; 2168 } 2169 2170 // The sum of a non-negative number and a power of two is not zero. 2171 if (XKnown.isNonNegative() && 2172 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 2173 return true; 2174 if (YKnown.isNonNegative() && 2175 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 2176 return true; 2177 } 2178 // X * Y. 2179 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 2180 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2181 // If X and Y are non-zero then so is X * Y as long as the multiplication 2182 // does not overflow. 2183 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) && 2184 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q)) 2185 return true; 2186 } 2187 // (C ? X : Y) != 0 if X != 0 and Y != 0. 2188 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 2189 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) && 2190 isKnownNonZero(SI->getFalseValue(), Depth, Q)) 2191 return true; 2192 } 2193 // PHI 2194 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 2195 // Try and detect a recurrence that monotonically increases from a 2196 // starting value, as these are common as induction variables. 2197 if (PN->getNumIncomingValues() == 2) { 2198 Value *Start = PN->getIncomingValue(0); 2199 Value *Induction = PN->getIncomingValue(1); 2200 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 2201 std::swap(Start, Induction); 2202 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 2203 if (!C->isZero() && !C->isNegative()) { 2204 ConstantInt *X; 2205 if (Q.IIQ.UseInstrInfo && 2206 (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 2207 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 2208 !X->isNegative()) 2209 return true; 2210 } 2211 } 2212 } 2213 // Check if all incoming values are non-zero constant. 2214 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) { 2215 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero(); 2216 }); 2217 if (AllNonZeroConstants) 2218 return true; 2219 } 2220 2221 KnownBits Known(BitWidth); 2222 computeKnownBits(V, Known, Depth, Q); 2223 return Known.One != 0; 2224 } 2225 2226 /// Return true if V2 == V1 + X, where X is known non-zero. 2227 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { 2228 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2229 if (!BO || BO->getOpcode() != Instruction::Add) 2230 return false; 2231 Value *Op = nullptr; 2232 if (V2 == BO->getOperand(0)) 2233 Op = BO->getOperand(1); 2234 else if (V2 == BO->getOperand(1)) 2235 Op = BO->getOperand(0); 2236 else 2237 return false; 2238 return isKnownNonZero(Op, 0, Q); 2239 } 2240 2241 /// Return true if it is known that V1 != V2. 2242 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { 2243 if (V1 == V2) 2244 return false; 2245 if (V1->getType() != V2->getType()) 2246 // We can't look through casts yet. 2247 return false; 2248 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) 2249 return true; 2250 2251 if (V1->getType()->isIntOrIntVectorTy()) { 2252 // Are any known bits in V1 contradictory to known bits in V2? If V1 2253 // has a known zero where V2 has a known one, they must not be equal. 2254 KnownBits Known1 = computeKnownBits(V1, 0, Q); 2255 KnownBits Known2 = computeKnownBits(V2, 0, Q); 2256 2257 if (Known1.Zero.intersects(Known2.One) || 2258 Known2.Zero.intersects(Known1.One)) 2259 return true; 2260 } 2261 return false; 2262 } 2263 2264 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2265 /// simplify operations downstream. Mask is known to be zero for bits that V 2266 /// cannot have. 2267 /// 2268 /// This function is defined on values with integer type, values with pointer 2269 /// type, and vectors of integers. In the case 2270 /// where V is a vector, the mask, known zero, and known one values are the 2271 /// same width as the vector element, and the bit is set only if it is true 2272 /// for all of the elements in the vector. 2273 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2274 const Query &Q) { 2275 KnownBits Known(Mask.getBitWidth()); 2276 computeKnownBits(V, Known, Depth, Q); 2277 return Mask.isSubsetOf(Known.Zero); 2278 } 2279 2280 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow). 2281 // Returns the input and lower/upper bounds. 2282 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, 2283 const APInt *&CLow, const APInt *&CHigh) { 2284 assert(isa<Operator>(Select) && 2285 cast<Operator>(Select)->getOpcode() == Instruction::Select && 2286 "Input should be a Select!"); 2287 2288 const Value *LHS, *RHS, *LHS2, *RHS2; 2289 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor; 2290 if (SPF != SPF_SMAX && SPF != SPF_SMIN) 2291 return false; 2292 2293 if (!match(RHS, m_APInt(CLow))) 2294 return false; 2295 2296 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor; 2297 if (getInverseMinMaxFlavor(SPF) != SPF2) 2298 return false; 2299 2300 if (!match(RHS2, m_APInt(CHigh))) 2301 return false; 2302 2303 if (SPF == SPF_SMIN) 2304 std::swap(CLow, CHigh); 2305 2306 In = LHS2; 2307 return CLow->sle(*CHigh); 2308 } 2309 2310 /// For vector constants, loop over the elements and find the constant with the 2311 /// minimum number of sign bits. Return 0 if the value is not a vector constant 2312 /// or if any element was not analyzed; otherwise, return the count for the 2313 /// element with the minimum number of sign bits. 2314 static unsigned computeNumSignBitsVectorConstant(const Value *V, 2315 unsigned TyBits) { 2316 const auto *CV = dyn_cast<Constant>(V); 2317 if (!CV || !CV->getType()->isVectorTy()) 2318 return 0; 2319 2320 unsigned MinSignBits = TyBits; 2321 unsigned NumElts = CV->getType()->getVectorNumElements(); 2322 for (unsigned i = 0; i != NumElts; ++i) { 2323 // If we find a non-ConstantInt, bail out. 2324 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 2325 if (!Elt) 2326 return 0; 2327 2328 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); 2329 } 2330 2331 return MinSignBits; 2332 } 2333 2334 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2335 const Query &Q); 2336 2337 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 2338 const Query &Q) { 2339 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q); 2340 assert(Result > 0 && "At least one sign bit needs to be present!"); 2341 return Result; 2342 } 2343 2344 /// Return the number of times the sign bit of the register is replicated into 2345 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2346 /// (itself), but other cases can give us information. For example, immediately 2347 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2348 /// other, so we return 3. For vectors, return the number of sign bits for the 2349 /// vector element with the minimum number of known sign bits. 2350 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2351 const Query &Q) { 2352 assert(Depth <= MaxDepth && "Limit Search Depth"); 2353 2354 // We return the minimum number of sign bits that are guaranteed to be present 2355 // in V, so for undef we have to conservatively return 1. We don't have the 2356 // same behavior for poison though -- that's a FIXME today. 2357 2358 Type *ScalarTy = V->getType()->getScalarType(); 2359 unsigned TyBits = ScalarTy->isPointerTy() ? 2360 Q.DL.getIndexTypeSizeInBits(ScalarTy) : 2361 Q.DL.getTypeSizeInBits(ScalarTy); 2362 2363 unsigned Tmp, Tmp2; 2364 unsigned FirstAnswer = 1; 2365 2366 // Note that ConstantInt is handled by the general computeKnownBits case 2367 // below. 2368 2369 if (Depth == MaxDepth) 2370 return 1; // Limit search depth. 2371 2372 const Operator *U = dyn_cast<Operator>(V); 2373 switch (Operator::getOpcode(V)) { 2374 default: break; 2375 case Instruction::SExt: 2376 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2377 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 2378 2379 case Instruction::SDiv: { 2380 const APInt *Denominator; 2381 // sdiv X, C -> adds log(C) sign bits. 2382 if (match(U->getOperand(1), m_APInt(Denominator))) { 2383 2384 // Ignore non-positive denominator. 2385 if (!Denominator->isStrictlyPositive()) 2386 break; 2387 2388 // Calculate the incoming numerator bits. 2389 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2390 2391 // Add floor(log(C)) bits to the numerator bits. 2392 return std::min(TyBits, NumBits + Denominator->logBase2()); 2393 } 2394 break; 2395 } 2396 2397 case Instruction::SRem: { 2398 const APInt *Denominator; 2399 // srem X, C -> we know that the result is within [-C+1,C) when C is a 2400 // positive constant. This let us put a lower bound on the number of sign 2401 // bits. 2402 if (match(U->getOperand(1), m_APInt(Denominator))) { 2403 2404 // Ignore non-positive denominator. 2405 if (!Denominator->isStrictlyPositive()) 2406 break; 2407 2408 // Calculate the incoming numerator bits. SRem by a positive constant 2409 // can't lower the number of sign bits. 2410 unsigned NumrBits = 2411 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2412 2413 // Calculate the leading sign bit constraints by examining the 2414 // denominator. Given that the denominator is positive, there are two 2415 // cases: 2416 // 2417 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2418 // (1 << ceilLogBase2(C)). 2419 // 2420 // 2. the numerator is negative. Then the result range is (-C,0] and 2421 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2422 // 2423 // Thus a lower bound on the number of sign bits is `TyBits - 2424 // ceilLogBase2(C)`. 2425 2426 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2427 return std::max(NumrBits, ResBits); 2428 } 2429 break; 2430 } 2431 2432 case Instruction::AShr: { 2433 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2434 // ashr X, C -> adds C sign bits. Vectors too. 2435 const APInt *ShAmt; 2436 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2437 if (ShAmt->uge(TyBits)) 2438 break; // Bad shift. 2439 unsigned ShAmtLimited = ShAmt->getZExtValue(); 2440 Tmp += ShAmtLimited; 2441 if (Tmp > TyBits) Tmp = TyBits; 2442 } 2443 return Tmp; 2444 } 2445 case Instruction::Shl: { 2446 const APInt *ShAmt; 2447 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2448 // shl destroys sign bits. 2449 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2450 if (ShAmt->uge(TyBits) || // Bad shift. 2451 ShAmt->uge(Tmp)) break; // Shifted all sign bits out. 2452 Tmp2 = ShAmt->getZExtValue(); 2453 return Tmp - Tmp2; 2454 } 2455 break; 2456 } 2457 case Instruction::And: 2458 case Instruction::Or: 2459 case Instruction::Xor: // NOT is handled here. 2460 // Logical binary ops preserve the number of sign bits at the worst. 2461 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2462 if (Tmp != 1) { 2463 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2464 FirstAnswer = std::min(Tmp, Tmp2); 2465 // We computed what we know about the sign bits as our first 2466 // answer. Now proceed to the generic code that uses 2467 // computeKnownBits, and pick whichever answer is better. 2468 } 2469 break; 2470 2471 case Instruction::Select: { 2472 // If we have a clamp pattern, we know that the number of sign bits will be 2473 // the minimum of the clamp min/max range. 2474 const Value *X; 2475 const APInt *CLow, *CHigh; 2476 if (isSignedMinMaxClamp(U, X, CLow, CHigh)) 2477 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); 2478 2479 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2480 if (Tmp == 1) break; 2481 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 2482 return std::min(Tmp, Tmp2); 2483 } 2484 2485 case Instruction::Add: 2486 // Add can have at most one carry bit. Thus we know that the output 2487 // is, at worst, one more bit than the inputs. 2488 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2489 if (Tmp == 1) break; 2490 2491 // Special case decrementing a value (ADD X, -1): 2492 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2493 if (CRHS->isAllOnesValue()) { 2494 KnownBits Known(TyBits); 2495 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 2496 2497 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2498 // sign bits set. 2499 if ((Known.Zero | 1).isAllOnesValue()) 2500 return TyBits; 2501 2502 // If we are subtracting one from a positive number, there is no carry 2503 // out of the result. 2504 if (Known.isNonNegative()) 2505 return Tmp; 2506 } 2507 2508 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2509 if (Tmp2 == 1) break; 2510 return std::min(Tmp, Tmp2)-1; 2511 2512 case Instruction::Sub: 2513 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2514 if (Tmp2 == 1) break; 2515 2516 // Handle NEG. 2517 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2518 if (CLHS->isNullValue()) { 2519 KnownBits Known(TyBits); 2520 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 2521 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2522 // sign bits set. 2523 if ((Known.Zero | 1).isAllOnesValue()) 2524 return TyBits; 2525 2526 // If the input is known to be positive (the sign bit is known clear), 2527 // the output of the NEG has the same number of sign bits as the input. 2528 if (Known.isNonNegative()) 2529 return Tmp2; 2530 2531 // Otherwise, we treat this like a SUB. 2532 } 2533 2534 // Sub can have at most one carry bit. Thus we know that the output 2535 // is, at worst, one more bit than the inputs. 2536 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2537 if (Tmp == 1) break; 2538 return std::min(Tmp, Tmp2)-1; 2539 2540 case Instruction::Mul: { 2541 // The output of the Mul can be at most twice the valid bits in the inputs. 2542 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2543 if (SignBitsOp0 == 1) break; 2544 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2545 if (SignBitsOp1 == 1) break; 2546 unsigned OutValidBits = 2547 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); 2548 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; 2549 } 2550 2551 case Instruction::PHI: { 2552 const PHINode *PN = cast<PHINode>(U); 2553 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2554 // Don't analyze large in-degree PHIs. 2555 if (NumIncomingValues > 4) break; 2556 // Unreachable blocks may have zero-operand PHI nodes. 2557 if (NumIncomingValues == 0) break; 2558 2559 // Take the minimum of all incoming values. This can't infinitely loop 2560 // because of our depth threshold. 2561 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q); 2562 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2563 if (Tmp == 1) return Tmp; 2564 Tmp = std::min( 2565 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q)); 2566 } 2567 return Tmp; 2568 } 2569 2570 case Instruction::Trunc: 2571 // FIXME: it's tricky to do anything useful for this, but it is an important 2572 // case for targets like X86. 2573 break; 2574 2575 case Instruction::ExtractElement: 2576 // Look through extract element. At the moment we keep this simple and skip 2577 // tracking the specific element. But at least we might find information 2578 // valid for all elements of the vector (for example if vector is sign 2579 // extended, shifted, etc). 2580 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2581 2582 case Instruction::ShuffleVector: { 2583 // TODO: This is copied almost directly from the SelectionDAG version of 2584 // ComputeNumSignBits. It would be better if we could share common 2585 // code. If not, make sure that changes are translated to the DAG. 2586 2587 // Collect the minimum number of sign bits that are shared by every vector 2588 // element referenced by the shuffle. 2589 auto *Shuf = cast<ShuffleVectorInst>(U); 2590 int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements(); 2591 int NumMaskElts = Shuf->getMask()->getType()->getVectorNumElements(); 2592 APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0); 2593 for (int i = 0; i != NumMaskElts; ++i) { 2594 int M = Shuf->getMaskValue(i); 2595 assert(M < NumElts * 2 && "Invalid shuffle mask constant"); 2596 // For undef elements, we don't know anything about the common state of 2597 // the shuffle result. 2598 if (M == -1) 2599 return 1; 2600 if (M < NumElts) 2601 DemandedLHS.setBit(M % NumElts); 2602 else 2603 DemandedRHS.setBit(M % NumElts); 2604 } 2605 Tmp = std::numeric_limits<unsigned>::max(); 2606 if (!!DemandedLHS) 2607 Tmp = ComputeNumSignBits(Shuf->getOperand(0), Depth + 1, Q); 2608 if (!!DemandedRHS) { 2609 Tmp2 = ComputeNumSignBits(Shuf->getOperand(1), Depth + 1, Q); 2610 Tmp = std::min(Tmp, Tmp2); 2611 } 2612 // If we don't know anything, early out and try computeKnownBits fall-back. 2613 if (Tmp == 1) 2614 break; 2615 assert(Tmp <= V->getType()->getScalarSizeInBits() && 2616 "Failed to determine minimum sign bits"); 2617 return Tmp; 2618 } 2619 } 2620 2621 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2622 // use this information. 2623 2624 // If we can examine all elements of a vector constant successfully, we're 2625 // done (we can't do any better than that). If not, keep trying. 2626 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits)) 2627 return VecSignBits; 2628 2629 KnownBits Known(TyBits); 2630 computeKnownBits(V, Known, Depth, Q); 2631 2632 // If we know that the sign bit is either zero or one, determine the number of 2633 // identical bits in the top of the input value. 2634 return std::max(FirstAnswer, Known.countMinSignBits()); 2635 } 2636 2637 /// This function computes the integer multiple of Base that equals V. 2638 /// If successful, it returns true and returns the multiple in 2639 /// Multiple. If unsuccessful, it returns false. It looks 2640 /// through SExt instructions only if LookThroughSExt is true. 2641 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2642 bool LookThroughSExt, unsigned Depth) { 2643 const unsigned MaxDepth = 6; 2644 2645 assert(V && "No Value?"); 2646 assert(Depth <= MaxDepth && "Limit Search Depth"); 2647 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2648 2649 Type *T = V->getType(); 2650 2651 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2652 2653 if (Base == 0) 2654 return false; 2655 2656 if (Base == 1) { 2657 Multiple = V; 2658 return true; 2659 } 2660 2661 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2662 Constant *BaseVal = ConstantInt::get(T, Base); 2663 if (CO && CO == BaseVal) { 2664 // Multiple is 1. 2665 Multiple = ConstantInt::get(T, 1); 2666 return true; 2667 } 2668 2669 if (CI && CI->getZExtValue() % Base == 0) { 2670 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2671 return true; 2672 } 2673 2674 if (Depth == MaxDepth) return false; // Limit search depth. 2675 2676 Operator *I = dyn_cast<Operator>(V); 2677 if (!I) return false; 2678 2679 switch (I->getOpcode()) { 2680 default: break; 2681 case Instruction::SExt: 2682 if (!LookThroughSExt) return false; 2683 // otherwise fall through to ZExt 2684 LLVM_FALLTHROUGH; 2685 case Instruction::ZExt: 2686 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2687 LookThroughSExt, Depth+1); 2688 case Instruction::Shl: 2689 case Instruction::Mul: { 2690 Value *Op0 = I->getOperand(0); 2691 Value *Op1 = I->getOperand(1); 2692 2693 if (I->getOpcode() == Instruction::Shl) { 2694 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2695 if (!Op1CI) return false; 2696 // Turn Op0 << Op1 into Op0 * 2^Op1 2697 APInt Op1Int = Op1CI->getValue(); 2698 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2699 APInt API(Op1Int.getBitWidth(), 0); 2700 API.setBit(BitToSet); 2701 Op1 = ConstantInt::get(V->getContext(), API); 2702 } 2703 2704 Value *Mul0 = nullptr; 2705 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2706 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2707 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2708 if (Op1C->getType()->getPrimitiveSizeInBits() < 2709 MulC->getType()->getPrimitiveSizeInBits()) 2710 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2711 if (Op1C->getType()->getPrimitiveSizeInBits() > 2712 MulC->getType()->getPrimitiveSizeInBits()) 2713 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2714 2715 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2716 Multiple = ConstantExpr::getMul(MulC, Op1C); 2717 return true; 2718 } 2719 2720 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2721 if (Mul0CI->getValue() == 1) { 2722 // V == Base * Op1, so return Op1 2723 Multiple = Op1; 2724 return true; 2725 } 2726 } 2727 2728 Value *Mul1 = nullptr; 2729 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2730 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2731 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2732 if (Op0C->getType()->getPrimitiveSizeInBits() < 2733 MulC->getType()->getPrimitiveSizeInBits()) 2734 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2735 if (Op0C->getType()->getPrimitiveSizeInBits() > 2736 MulC->getType()->getPrimitiveSizeInBits()) 2737 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2738 2739 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2740 Multiple = ConstantExpr::getMul(MulC, Op0C); 2741 return true; 2742 } 2743 2744 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2745 if (Mul1CI->getValue() == 1) { 2746 // V == Base * Op0, so return Op0 2747 Multiple = Op0; 2748 return true; 2749 } 2750 } 2751 } 2752 } 2753 2754 // We could not determine if V is a multiple of Base. 2755 return false; 2756 } 2757 2758 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS, 2759 const TargetLibraryInfo *TLI) { 2760 const Function *F = ICS.getCalledFunction(); 2761 if (!F) 2762 return Intrinsic::not_intrinsic; 2763 2764 if (F->isIntrinsic()) 2765 return F->getIntrinsicID(); 2766 2767 if (!TLI) 2768 return Intrinsic::not_intrinsic; 2769 2770 LibFunc Func; 2771 // We're going to make assumptions on the semantics of the functions, check 2772 // that the target knows that it's available in this environment and it does 2773 // not have local linkage. 2774 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func)) 2775 return Intrinsic::not_intrinsic; 2776 2777 if (!ICS.onlyReadsMemory()) 2778 return Intrinsic::not_intrinsic; 2779 2780 // Otherwise check if we have a call to a function that can be turned into a 2781 // vector intrinsic. 2782 switch (Func) { 2783 default: 2784 break; 2785 case LibFunc_sin: 2786 case LibFunc_sinf: 2787 case LibFunc_sinl: 2788 return Intrinsic::sin; 2789 case LibFunc_cos: 2790 case LibFunc_cosf: 2791 case LibFunc_cosl: 2792 return Intrinsic::cos; 2793 case LibFunc_exp: 2794 case LibFunc_expf: 2795 case LibFunc_expl: 2796 return Intrinsic::exp; 2797 case LibFunc_exp2: 2798 case LibFunc_exp2f: 2799 case LibFunc_exp2l: 2800 return Intrinsic::exp2; 2801 case LibFunc_log: 2802 case LibFunc_logf: 2803 case LibFunc_logl: 2804 return Intrinsic::log; 2805 case LibFunc_log10: 2806 case LibFunc_log10f: 2807 case LibFunc_log10l: 2808 return Intrinsic::log10; 2809 case LibFunc_log2: 2810 case LibFunc_log2f: 2811 case LibFunc_log2l: 2812 return Intrinsic::log2; 2813 case LibFunc_fabs: 2814 case LibFunc_fabsf: 2815 case LibFunc_fabsl: 2816 return Intrinsic::fabs; 2817 case LibFunc_fmin: 2818 case LibFunc_fminf: 2819 case LibFunc_fminl: 2820 return Intrinsic::minnum; 2821 case LibFunc_fmax: 2822 case LibFunc_fmaxf: 2823 case LibFunc_fmaxl: 2824 return Intrinsic::maxnum; 2825 case LibFunc_copysign: 2826 case LibFunc_copysignf: 2827 case LibFunc_copysignl: 2828 return Intrinsic::copysign; 2829 case LibFunc_floor: 2830 case LibFunc_floorf: 2831 case LibFunc_floorl: 2832 return Intrinsic::floor; 2833 case LibFunc_ceil: 2834 case LibFunc_ceilf: 2835 case LibFunc_ceill: 2836 return Intrinsic::ceil; 2837 case LibFunc_trunc: 2838 case LibFunc_truncf: 2839 case LibFunc_truncl: 2840 return Intrinsic::trunc; 2841 case LibFunc_rint: 2842 case LibFunc_rintf: 2843 case LibFunc_rintl: 2844 return Intrinsic::rint; 2845 case LibFunc_nearbyint: 2846 case LibFunc_nearbyintf: 2847 case LibFunc_nearbyintl: 2848 return Intrinsic::nearbyint; 2849 case LibFunc_round: 2850 case LibFunc_roundf: 2851 case LibFunc_roundl: 2852 return Intrinsic::round; 2853 case LibFunc_pow: 2854 case LibFunc_powf: 2855 case LibFunc_powl: 2856 return Intrinsic::pow; 2857 case LibFunc_sqrt: 2858 case LibFunc_sqrtf: 2859 case LibFunc_sqrtl: 2860 return Intrinsic::sqrt; 2861 } 2862 2863 return Intrinsic::not_intrinsic; 2864 } 2865 2866 /// Return true if we can prove that the specified FP value is never equal to 2867 /// -0.0. 2868 /// 2869 /// NOTE: this function will need to be revisited when we support non-default 2870 /// rounding modes! 2871 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 2872 unsigned Depth) { 2873 if (auto *CFP = dyn_cast<ConstantFP>(V)) 2874 return !CFP->getValueAPF().isNegZero(); 2875 2876 // Limit search depth. 2877 if (Depth == MaxDepth) 2878 return false; 2879 2880 auto *Op = dyn_cast<Operator>(V); 2881 if (!Op) 2882 return false; 2883 2884 // Check if the nsz fast-math flag is set. 2885 if (auto *FPO = dyn_cast<FPMathOperator>(Op)) 2886 if (FPO->hasNoSignedZeros()) 2887 return true; 2888 2889 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. 2890 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP()))) 2891 return true; 2892 2893 // sitofp and uitofp turn into +0.0 for zero. 2894 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) 2895 return true; 2896 2897 if (auto *Call = dyn_cast<CallInst>(Op)) { 2898 Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI); 2899 switch (IID) { 2900 default: 2901 break; 2902 // sqrt(-0.0) = -0.0, no other negative results are possible. 2903 case Intrinsic::sqrt: 2904 case Intrinsic::canonicalize: 2905 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); 2906 // fabs(x) != -0.0 2907 case Intrinsic::fabs: 2908 return true; 2909 } 2910 } 2911 2912 return false; 2913 } 2914 2915 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 2916 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 2917 /// bit despite comparing equal. 2918 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 2919 const TargetLibraryInfo *TLI, 2920 bool SignBitOnly, 2921 unsigned Depth) { 2922 // TODO: This function does not do the right thing when SignBitOnly is true 2923 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 2924 // which flips the sign bits of NaNs. See 2925 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2926 2927 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2928 return !CFP->getValueAPF().isNegative() || 2929 (!SignBitOnly && CFP->getValueAPF().isZero()); 2930 } 2931 2932 // Handle vector of constants. 2933 if (auto *CV = dyn_cast<Constant>(V)) { 2934 if (CV->getType()->isVectorTy()) { 2935 unsigned NumElts = CV->getType()->getVectorNumElements(); 2936 for (unsigned i = 0; i != NumElts; ++i) { 2937 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 2938 if (!CFP) 2939 return false; 2940 if (CFP->getValueAPF().isNegative() && 2941 (SignBitOnly || !CFP->getValueAPF().isZero())) 2942 return false; 2943 } 2944 2945 // All non-negative ConstantFPs. 2946 return true; 2947 } 2948 } 2949 2950 if (Depth == MaxDepth) 2951 return false; // Limit search depth. 2952 2953 const Operator *I = dyn_cast<Operator>(V); 2954 if (!I) 2955 return false; 2956 2957 switch (I->getOpcode()) { 2958 default: 2959 break; 2960 // Unsigned integers are always nonnegative. 2961 case Instruction::UIToFP: 2962 return true; 2963 case Instruction::FMul: 2964 // x*x is always non-negative or a NaN. 2965 if (I->getOperand(0) == I->getOperand(1) && 2966 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 2967 return true; 2968 2969 LLVM_FALLTHROUGH; 2970 case Instruction::FAdd: 2971 case Instruction::FDiv: 2972 case Instruction::FRem: 2973 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2974 Depth + 1) && 2975 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2976 Depth + 1); 2977 case Instruction::Select: 2978 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2979 Depth + 1) && 2980 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2981 Depth + 1); 2982 case Instruction::FPExt: 2983 case Instruction::FPTrunc: 2984 // Widening/narrowing never change sign. 2985 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2986 Depth + 1); 2987 case Instruction::ExtractElement: 2988 // Look through extract element. At the moment we keep this simple and skip 2989 // tracking the specific element. But at least we might find information 2990 // valid for all elements of the vector. 2991 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2992 Depth + 1); 2993 case Instruction::Call: 2994 const auto *CI = cast<CallInst>(I); 2995 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2996 switch (IID) { 2997 default: 2998 break; 2999 case Intrinsic::maxnum: 3000 return (isKnownNeverNaN(I->getOperand(0), TLI) && 3001 cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, 3002 SignBitOnly, Depth + 1)) || 3003 (isKnownNeverNaN(I->getOperand(1), TLI) && 3004 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, 3005 SignBitOnly, Depth + 1)); 3006 3007 case Intrinsic::maximum: 3008 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3009 Depth + 1) || 3010 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3011 Depth + 1); 3012 case Intrinsic::minnum: 3013 case Intrinsic::minimum: 3014 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3015 Depth + 1) && 3016 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3017 Depth + 1); 3018 case Intrinsic::exp: 3019 case Intrinsic::exp2: 3020 case Intrinsic::fabs: 3021 return true; 3022 3023 case Intrinsic::sqrt: 3024 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 3025 if (!SignBitOnly) 3026 return true; 3027 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 3028 CannotBeNegativeZero(CI->getOperand(0), TLI)); 3029 3030 case Intrinsic::powi: 3031 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 3032 // powi(x,n) is non-negative if n is even. 3033 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 3034 return true; 3035 } 3036 // TODO: This is not correct. Given that exp is an integer, here are the 3037 // ways that pow can return a negative value: 3038 // 3039 // pow(x, exp) --> negative if exp is odd and x is negative. 3040 // pow(-0, exp) --> -inf if exp is negative odd. 3041 // pow(-0, exp) --> -0 if exp is positive odd. 3042 // pow(-inf, exp) --> -0 if exp is negative odd. 3043 // pow(-inf, exp) --> -inf if exp is positive odd. 3044 // 3045 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 3046 // but we must return false if x == -0. Unfortunately we do not currently 3047 // have a way of expressing this constraint. See details in 3048 // https://llvm.org/bugs/show_bug.cgi?id=31702. 3049 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3050 Depth + 1); 3051 3052 case Intrinsic::fma: 3053 case Intrinsic::fmuladd: 3054 // x*x+y is non-negative if y is non-negative. 3055 return I->getOperand(0) == I->getOperand(1) && 3056 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 3057 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 3058 Depth + 1); 3059 } 3060 break; 3061 } 3062 return false; 3063 } 3064 3065 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 3066 const TargetLibraryInfo *TLI) { 3067 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 3068 } 3069 3070 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 3071 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 3072 } 3073 3074 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, 3075 unsigned Depth) { 3076 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"); 3077 3078 // If we're told that NaNs won't happen, assume they won't. 3079 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 3080 if (FPMathOp->hasNoNaNs()) 3081 return true; 3082 3083 // Handle scalar constants. 3084 if (auto *CFP = dyn_cast<ConstantFP>(V)) 3085 return !CFP->isNaN(); 3086 3087 if (Depth == MaxDepth) 3088 return false; 3089 3090 if (auto *Inst = dyn_cast<Instruction>(V)) { 3091 switch (Inst->getOpcode()) { 3092 case Instruction::FAdd: 3093 case Instruction::FMul: 3094 case Instruction::FSub: 3095 case Instruction::FDiv: 3096 case Instruction::FRem: { 3097 // TODO: Need isKnownNeverInfinity 3098 return false; 3099 } 3100 case Instruction::Select: { 3101 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 3102 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1); 3103 } 3104 case Instruction::SIToFP: 3105 case Instruction::UIToFP: 3106 return true; 3107 case Instruction::FPTrunc: 3108 case Instruction::FPExt: 3109 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1); 3110 default: 3111 break; 3112 } 3113 } 3114 3115 if (const auto *II = dyn_cast<IntrinsicInst>(V)) { 3116 switch (II->getIntrinsicID()) { 3117 case Intrinsic::canonicalize: 3118 case Intrinsic::fabs: 3119 case Intrinsic::copysign: 3120 case Intrinsic::exp: 3121 case Intrinsic::exp2: 3122 case Intrinsic::floor: 3123 case Intrinsic::ceil: 3124 case Intrinsic::trunc: 3125 case Intrinsic::rint: 3126 case Intrinsic::nearbyint: 3127 case Intrinsic::round: 3128 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1); 3129 case Intrinsic::sqrt: 3130 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) && 3131 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI); 3132 default: 3133 return false; 3134 } 3135 } 3136 3137 // Bail out for constant expressions, but try to handle vector constants. 3138 if (!V->getType()->isVectorTy() || !isa<Constant>(V)) 3139 return false; 3140 3141 // For vectors, verify that each element is not NaN. 3142 unsigned NumElts = V->getType()->getVectorNumElements(); 3143 for (unsigned i = 0; i != NumElts; ++i) { 3144 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 3145 if (!Elt) 3146 return false; 3147 if (isa<UndefValue>(Elt)) 3148 continue; 3149 auto *CElt = dyn_cast<ConstantFP>(Elt); 3150 if (!CElt || CElt->isNaN()) 3151 return false; 3152 } 3153 // All elements were confirmed not-NaN or undefined. 3154 return true; 3155 } 3156 3157 Value *llvm::isBytewiseValue(Value *V) { 3158 3159 // All byte-wide stores are splatable, even of arbitrary variables. 3160 if (V->getType()->isIntegerTy(8)) 3161 return V; 3162 3163 LLVMContext &Ctx = V->getContext(); 3164 3165 // Undef don't care. 3166 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx)); 3167 if (isa<UndefValue>(V)) 3168 return UndefInt8; 3169 3170 Constant *C = dyn_cast<Constant>(V); 3171 if (!C) { 3172 // Conceptually, we could handle things like: 3173 // %a = zext i8 %X to i16 3174 // %b = shl i16 %a, 8 3175 // %c = or i16 %a, %b 3176 // but until there is an example that actually needs this, it doesn't seem 3177 // worth worrying about. 3178 return nullptr; 3179 } 3180 3181 // Handle 'null' ConstantArrayZero etc. 3182 if (C->isNullValue()) 3183 return Constant::getNullValue(Type::getInt8Ty(Ctx)); 3184 3185 // Constant floating-point values can be handled as integer values if the 3186 // corresponding integer value is "byteable". An important case is 0.0. 3187 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 3188 Type *Ty = nullptr; 3189 if (CFP->getType()->isHalfTy()) 3190 Ty = Type::getInt16Ty(Ctx); 3191 else if (CFP->getType()->isFloatTy()) 3192 Ty = Type::getInt32Ty(Ctx); 3193 else if (CFP->getType()->isDoubleTy()) 3194 Ty = Type::getInt64Ty(Ctx); 3195 // Don't handle long double formats, which have strange constraints. 3196 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty)) : nullptr; 3197 } 3198 3199 // We can handle constant integers that are multiple of 8 bits. 3200 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 3201 if (CI->getBitWidth() % 8 == 0) { 3202 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 3203 if (!CI->getValue().isSplat(8)) 3204 return nullptr; 3205 return ConstantInt::get(Ctx, CI->getValue().trunc(8)); 3206 } 3207 } 3208 3209 auto Merge = [&](Value *LHS, Value *RHS) -> Value * { 3210 if (LHS == RHS) 3211 return LHS; 3212 if (!LHS || !RHS) 3213 return nullptr; 3214 if (LHS == UndefInt8) 3215 return RHS; 3216 if (RHS == UndefInt8) 3217 return LHS; 3218 return nullptr; 3219 }; 3220 3221 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) { 3222 Value *Val = UndefInt8; 3223 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) 3224 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I))))) 3225 return nullptr; 3226 return Val; 3227 } 3228 3229 if (isa<ConstantVector>(C)) { 3230 Constant *Splat = cast<ConstantVector>(C)->getSplatValue(); 3231 return Splat ? isBytewiseValue(Splat) : nullptr; 3232 } 3233 3234 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) { 3235 Value *Val = UndefInt8; 3236 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) 3237 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I))))) 3238 return nullptr; 3239 return Val; 3240 } 3241 3242 // Don't try to handle the handful of other constants. 3243 return nullptr; 3244 } 3245 3246 // This is the recursive version of BuildSubAggregate. It takes a few different 3247 // arguments. Idxs is the index within the nested struct From that we are 3248 // looking at now (which is of type IndexedType). IdxSkip is the number of 3249 // indices from Idxs that should be left out when inserting into the resulting 3250 // struct. To is the result struct built so far, new insertvalue instructions 3251 // build on that. 3252 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 3253 SmallVectorImpl<unsigned> &Idxs, 3254 unsigned IdxSkip, 3255 Instruction *InsertBefore) { 3256 StructType *STy = dyn_cast<StructType>(IndexedType); 3257 if (STy) { 3258 // Save the original To argument so we can modify it 3259 Value *OrigTo = To; 3260 // General case, the type indexed by Idxs is a struct 3261 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3262 // Process each struct element recursively 3263 Idxs.push_back(i); 3264 Value *PrevTo = To; 3265 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 3266 InsertBefore); 3267 Idxs.pop_back(); 3268 if (!To) { 3269 // Couldn't find any inserted value for this index? Cleanup 3270 while (PrevTo != OrigTo) { 3271 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 3272 PrevTo = Del->getAggregateOperand(); 3273 Del->eraseFromParent(); 3274 } 3275 // Stop processing elements 3276 break; 3277 } 3278 } 3279 // If we successfully found a value for each of our subaggregates 3280 if (To) 3281 return To; 3282 } 3283 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 3284 // the struct's elements had a value that was inserted directly. In the latter 3285 // case, perhaps we can't determine each of the subelements individually, but 3286 // we might be able to find the complete struct somewhere. 3287 3288 // Find the value that is at that particular spot 3289 Value *V = FindInsertedValue(From, Idxs); 3290 3291 if (!V) 3292 return nullptr; 3293 3294 // Insert the value in the new (sub) aggregate 3295 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 3296 "tmp", InsertBefore); 3297 } 3298 3299 // This helper takes a nested struct and extracts a part of it (which is again a 3300 // struct) into a new value. For example, given the struct: 3301 // { a, { b, { c, d }, e } } 3302 // and the indices "1, 1" this returns 3303 // { c, d }. 3304 // 3305 // It does this by inserting an insertvalue for each element in the resulting 3306 // struct, as opposed to just inserting a single struct. This will only work if 3307 // each of the elements of the substruct are known (ie, inserted into From by an 3308 // insertvalue instruction somewhere). 3309 // 3310 // All inserted insertvalue instructions are inserted before InsertBefore 3311 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 3312 Instruction *InsertBefore) { 3313 assert(InsertBefore && "Must have someplace to insert!"); 3314 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 3315 idx_range); 3316 Value *To = UndefValue::get(IndexedType); 3317 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 3318 unsigned IdxSkip = Idxs.size(); 3319 3320 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 3321 } 3322 3323 /// Given an aggregate and a sequence of indices, see if the scalar value 3324 /// indexed is already around as a register, for example if it was inserted 3325 /// directly into the aggregate. 3326 /// 3327 /// If InsertBefore is not null, this function will duplicate (modified) 3328 /// insertvalues when a part of a nested struct is extracted. 3329 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 3330 Instruction *InsertBefore) { 3331 // Nothing to index? Just return V then (this is useful at the end of our 3332 // recursion). 3333 if (idx_range.empty()) 3334 return V; 3335 // We have indices, so V should have an indexable type. 3336 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 3337 "Not looking at a struct or array?"); 3338 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 3339 "Invalid indices for type?"); 3340 3341 if (Constant *C = dyn_cast<Constant>(V)) { 3342 C = C->getAggregateElement(idx_range[0]); 3343 if (!C) return nullptr; 3344 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 3345 } 3346 3347 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 3348 // Loop the indices for the insertvalue instruction in parallel with the 3349 // requested indices 3350 const unsigned *req_idx = idx_range.begin(); 3351 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 3352 i != e; ++i, ++req_idx) { 3353 if (req_idx == idx_range.end()) { 3354 // We can't handle this without inserting insertvalues 3355 if (!InsertBefore) 3356 return nullptr; 3357 3358 // The requested index identifies a part of a nested aggregate. Handle 3359 // this specially. For example, 3360 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 3361 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 3362 // %C = extractvalue {i32, { i32, i32 } } %B, 1 3363 // This can be changed into 3364 // %A = insertvalue {i32, i32 } undef, i32 10, 0 3365 // %C = insertvalue {i32, i32 } %A, i32 11, 1 3366 // which allows the unused 0,0 element from the nested struct to be 3367 // removed. 3368 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 3369 InsertBefore); 3370 } 3371 3372 // This insert value inserts something else than what we are looking for. 3373 // See if the (aggregate) value inserted into has the value we are 3374 // looking for, then. 3375 if (*req_idx != *i) 3376 return FindInsertedValue(I->getAggregateOperand(), idx_range, 3377 InsertBefore); 3378 } 3379 // If we end up here, the indices of the insertvalue match with those 3380 // requested (though possibly only partially). Now we recursively look at 3381 // the inserted value, passing any remaining indices. 3382 return FindInsertedValue(I->getInsertedValueOperand(), 3383 makeArrayRef(req_idx, idx_range.end()), 3384 InsertBefore); 3385 } 3386 3387 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 3388 // If we're extracting a value from an aggregate that was extracted from 3389 // something else, we can extract from that something else directly instead. 3390 // However, we will need to chain I's indices with the requested indices. 3391 3392 // Calculate the number of indices required 3393 unsigned size = I->getNumIndices() + idx_range.size(); 3394 // Allocate some space to put the new indices in 3395 SmallVector<unsigned, 5> Idxs; 3396 Idxs.reserve(size); 3397 // Add indices from the extract value instruction 3398 Idxs.append(I->idx_begin(), I->idx_end()); 3399 3400 // Add requested indices 3401 Idxs.append(idx_range.begin(), idx_range.end()); 3402 3403 assert(Idxs.size() == size 3404 && "Number of indices added not correct?"); 3405 3406 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 3407 } 3408 // Otherwise, we don't know (such as, extracting from a function return value 3409 // or load instruction) 3410 return nullptr; 3411 } 3412 3413 /// Analyze the specified pointer to see if it can be expressed as a base 3414 /// pointer plus a constant offset. Return the base and offset to the caller. 3415 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 3416 const DataLayout &DL) { 3417 unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType()); 3418 APInt ByteOffset(BitWidth, 0); 3419 3420 // We walk up the defs but use a visited set to handle unreachable code. In 3421 // that case, we stop after accumulating the cycle once (not that it 3422 // matters). 3423 SmallPtrSet<Value *, 16> Visited; 3424 while (Visited.insert(Ptr).second) { 3425 if (Ptr->getType()->isVectorTy()) 3426 break; 3427 3428 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 3429 // If one of the values we have visited is an addrspacecast, then 3430 // the pointer type of this GEP may be different from the type 3431 // of the Ptr parameter which was passed to this function. This 3432 // means when we construct GEPOffset, we need to use the size 3433 // of GEP's pointer type rather than the size of the original 3434 // pointer type. 3435 APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0); 3436 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 3437 break; 3438 3439 APInt OrigByteOffset(ByteOffset); 3440 ByteOffset += GEPOffset.sextOrTrunc(ByteOffset.getBitWidth()); 3441 if (ByteOffset.getMinSignedBits() > 64) { 3442 // Stop traversal if the pointer offset wouldn't fit into int64_t 3443 // (this should be removed if Offset is updated to an APInt) 3444 ByteOffset = OrigByteOffset; 3445 break; 3446 } 3447 3448 Ptr = GEP->getPointerOperand(); 3449 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 3450 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 3451 Ptr = cast<Operator>(Ptr)->getOperand(0); 3452 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 3453 if (GA->isInterposable()) 3454 break; 3455 Ptr = GA->getAliasee(); 3456 } else { 3457 break; 3458 } 3459 } 3460 Offset = ByteOffset.getSExtValue(); 3461 return Ptr; 3462 } 3463 3464 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 3465 unsigned CharSize) { 3466 // Make sure the GEP has exactly three arguments. 3467 if (GEP->getNumOperands() != 3) 3468 return false; 3469 3470 // Make sure the index-ee is a pointer to array of \p CharSize integers. 3471 // CharSize. 3472 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 3473 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 3474 return false; 3475 3476 // Check to make sure that the first operand of the GEP is an integer and 3477 // has value 0 so that we are sure we're indexing into the initializer. 3478 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 3479 if (!FirstIdx || !FirstIdx->isZero()) 3480 return false; 3481 3482 return true; 3483 } 3484 3485 bool llvm::getConstantDataArrayInfo(const Value *V, 3486 ConstantDataArraySlice &Slice, 3487 unsigned ElementSize, uint64_t Offset) { 3488 assert(V); 3489 3490 // Look through bitcast instructions and geps. 3491 V = V->stripPointerCasts(); 3492 3493 // If the value is a GEP instruction or constant expression, treat it as an 3494 // offset. 3495 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3496 // The GEP operator should be based on a pointer to string constant, and is 3497 // indexing into the string constant. 3498 if (!isGEPBasedOnPointerToString(GEP, ElementSize)) 3499 return false; 3500 3501 // If the second index isn't a ConstantInt, then this is a variable index 3502 // into the array. If this occurs, we can't say anything meaningful about 3503 // the string. 3504 uint64_t StartIdx = 0; 3505 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 3506 StartIdx = CI->getZExtValue(); 3507 else 3508 return false; 3509 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize, 3510 StartIdx + Offset); 3511 } 3512 3513 // The GEP instruction, constant or instruction, must reference a global 3514 // variable that is a constant and is initialized. The referenced constant 3515 // initializer is the array that we'll use for optimization. 3516 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 3517 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 3518 return false; 3519 3520 const ConstantDataArray *Array; 3521 ArrayType *ArrayTy; 3522 if (GV->getInitializer()->isNullValue()) { 3523 Type *GVTy = GV->getValueType(); 3524 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) { 3525 // A zeroinitializer for the array; there is no ConstantDataArray. 3526 Array = nullptr; 3527 } else { 3528 const DataLayout &DL = GV->getParent()->getDataLayout(); 3529 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy); 3530 uint64_t Length = SizeInBytes / (ElementSize / 8); 3531 if (Length <= Offset) 3532 return false; 3533 3534 Slice.Array = nullptr; 3535 Slice.Offset = 0; 3536 Slice.Length = Length - Offset; 3537 return true; 3538 } 3539 } else { 3540 // This must be a ConstantDataArray. 3541 Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 3542 if (!Array) 3543 return false; 3544 ArrayTy = Array->getType(); 3545 } 3546 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize)) 3547 return false; 3548 3549 uint64_t NumElts = ArrayTy->getArrayNumElements(); 3550 if (Offset > NumElts) 3551 return false; 3552 3553 Slice.Array = Array; 3554 Slice.Offset = Offset; 3555 Slice.Length = NumElts - Offset; 3556 return true; 3557 } 3558 3559 /// This function computes the length of a null-terminated C string pointed to 3560 /// by V. If successful, it returns true and returns the string in Str. 3561 /// If unsuccessful, it returns false. 3562 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 3563 uint64_t Offset, bool TrimAtNul) { 3564 ConstantDataArraySlice Slice; 3565 if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) 3566 return false; 3567 3568 if (Slice.Array == nullptr) { 3569 if (TrimAtNul) { 3570 Str = StringRef(); 3571 return true; 3572 } 3573 if (Slice.Length == 1) { 3574 Str = StringRef("", 1); 3575 return true; 3576 } 3577 // We cannot instantiate a StringRef as we do not have an appropriate string 3578 // of 0s at hand. 3579 return false; 3580 } 3581 3582 // Start out with the entire array in the StringRef. 3583 Str = Slice.Array->getAsString(); 3584 // Skip over 'offset' bytes. 3585 Str = Str.substr(Slice.Offset); 3586 3587 if (TrimAtNul) { 3588 // Trim off the \0 and anything after it. If the array is not nul 3589 // terminated, we just return the whole end of string. The client may know 3590 // some other way that the string is length-bound. 3591 Str = Str.substr(0, Str.find('\0')); 3592 } 3593 return true; 3594 } 3595 3596 // These next two are very similar to the above, but also look through PHI 3597 // nodes. 3598 // TODO: See if we can integrate these two together. 3599 3600 /// If we can compute the length of the string pointed to by 3601 /// the specified pointer, return 'len+1'. If we can't, return 0. 3602 static uint64_t GetStringLengthH(const Value *V, 3603 SmallPtrSetImpl<const PHINode*> &PHIs, 3604 unsigned CharSize) { 3605 // Look through noop bitcast instructions. 3606 V = V->stripPointerCasts(); 3607 3608 // If this is a PHI node, there are two cases: either we have already seen it 3609 // or we haven't. 3610 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 3611 if (!PHIs.insert(PN).second) 3612 return ~0ULL; // already in the set. 3613 3614 // If it was new, see if all the input strings are the same length. 3615 uint64_t LenSoFar = ~0ULL; 3616 for (Value *IncValue : PN->incoming_values()) { 3617 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 3618 if (Len == 0) return 0; // Unknown length -> unknown. 3619 3620 if (Len == ~0ULL) continue; 3621 3622 if (Len != LenSoFar && LenSoFar != ~0ULL) 3623 return 0; // Disagree -> unknown. 3624 LenSoFar = Len; 3625 } 3626 3627 // Success, all agree. 3628 return LenSoFar; 3629 } 3630 3631 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 3632 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 3633 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 3634 if (Len1 == 0) return 0; 3635 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 3636 if (Len2 == 0) return 0; 3637 if (Len1 == ~0ULL) return Len2; 3638 if (Len2 == ~0ULL) return Len1; 3639 if (Len1 != Len2) return 0; 3640 return Len1; 3641 } 3642 3643 // Otherwise, see if we can read the string. 3644 ConstantDataArraySlice Slice; 3645 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 3646 return 0; 3647 3648 if (Slice.Array == nullptr) 3649 return 1; 3650 3651 // Search for nul characters 3652 unsigned NullIndex = 0; 3653 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 3654 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 3655 break; 3656 } 3657 3658 return NullIndex + 1; 3659 } 3660 3661 /// If we can compute the length of the string pointed to by 3662 /// the specified pointer, return 'len+1'. If we can't, return 0. 3663 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 3664 if (!V->getType()->isPointerTy()) 3665 return 0; 3666 3667 SmallPtrSet<const PHINode*, 32> PHIs; 3668 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 3669 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 3670 // an empty string as a length. 3671 return Len == ~0ULL ? 1 : Len; 3672 } 3673 3674 const Value *llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call) { 3675 assert(Call && 3676 "getArgumentAliasingToReturnedPointer only works on nonnull calls"); 3677 if (const Value *RV = Call->getReturnedArgOperand()) 3678 return RV; 3679 // This can be used only as a aliasing property. 3680 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(Call)) 3681 return Call->getArgOperand(0); 3682 return nullptr; 3683 } 3684 3685 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 3686 const CallBase *Call) { 3687 return Call->getIntrinsicID() == Intrinsic::launder_invariant_group || 3688 Call->getIntrinsicID() == Intrinsic::strip_invariant_group; 3689 } 3690 3691 /// \p PN defines a loop-variant pointer to an object. Check if the 3692 /// previous iteration of the loop was referring to the same object as \p PN. 3693 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 3694 const LoopInfo *LI) { 3695 // Find the loop-defined value. 3696 Loop *L = LI->getLoopFor(PN->getParent()); 3697 if (PN->getNumIncomingValues() != 2) 3698 return true; 3699 3700 // Find the value from previous iteration. 3701 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 3702 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3703 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 3704 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3705 return true; 3706 3707 // If a new pointer is loaded in the loop, the pointer references a different 3708 // object in every iteration. E.g.: 3709 // for (i) 3710 // int *p = a[i]; 3711 // ... 3712 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 3713 if (!L->isLoopInvariant(Load->getPointerOperand())) 3714 return false; 3715 return true; 3716 } 3717 3718 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 3719 unsigned MaxLookup) { 3720 if (!V->getType()->isPointerTy()) 3721 return V; 3722 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 3723 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3724 V = GEP->getPointerOperand(); 3725 } else if (Operator::getOpcode(V) == Instruction::BitCast || 3726 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 3727 V = cast<Operator>(V)->getOperand(0); 3728 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 3729 if (GA->isInterposable()) 3730 return V; 3731 V = GA->getAliasee(); 3732 } else if (isa<AllocaInst>(V)) { 3733 // An alloca can't be further simplified. 3734 return V; 3735 } else { 3736 if (auto *Call = dyn_cast<CallBase>(V)) { 3737 // CaptureTracking can know about special capturing properties of some 3738 // intrinsics like launder.invariant.group, that can't be expressed with 3739 // the attributes, but have properties like returning aliasing pointer. 3740 // Because some analysis may assume that nocaptured pointer is not 3741 // returned from some special intrinsic (because function would have to 3742 // be marked with returns attribute), it is crucial to use this function 3743 // because it should be in sync with CaptureTracking. Not using it may 3744 // cause weird miscompilations where 2 aliasing pointers are assumed to 3745 // noalias. 3746 if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) { 3747 V = RP; 3748 continue; 3749 } 3750 } 3751 3752 // See if InstructionSimplify knows any relevant tricks. 3753 if (Instruction *I = dyn_cast<Instruction>(V)) 3754 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 3755 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) { 3756 V = Simplified; 3757 continue; 3758 } 3759 3760 return V; 3761 } 3762 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 3763 } 3764 return V; 3765 } 3766 3767 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 3768 const DataLayout &DL, LoopInfo *LI, 3769 unsigned MaxLookup) { 3770 SmallPtrSet<Value *, 4> Visited; 3771 SmallVector<Value *, 4> Worklist; 3772 Worklist.push_back(V); 3773 do { 3774 Value *P = Worklist.pop_back_val(); 3775 P = GetUnderlyingObject(P, DL, MaxLookup); 3776 3777 if (!Visited.insert(P).second) 3778 continue; 3779 3780 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 3781 Worklist.push_back(SI->getTrueValue()); 3782 Worklist.push_back(SI->getFalseValue()); 3783 continue; 3784 } 3785 3786 if (PHINode *PN = dyn_cast<PHINode>(P)) { 3787 // If this PHI changes the underlying object in every iteration of the 3788 // loop, don't look through it. Consider: 3789 // int **A; 3790 // for (i) { 3791 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 3792 // Curr = A[i]; 3793 // *Prev, *Curr; 3794 // 3795 // Prev is tracking Curr one iteration behind so they refer to different 3796 // underlying objects. 3797 if (!LI || !LI->isLoopHeader(PN->getParent()) || 3798 isSameUnderlyingObjectInLoop(PN, LI)) 3799 for (Value *IncValue : PN->incoming_values()) 3800 Worklist.push_back(IncValue); 3801 continue; 3802 } 3803 3804 Objects.push_back(P); 3805 } while (!Worklist.empty()); 3806 } 3807 3808 /// This is the function that does the work of looking through basic 3809 /// ptrtoint+arithmetic+inttoptr sequences. 3810 static const Value *getUnderlyingObjectFromInt(const Value *V) { 3811 do { 3812 if (const Operator *U = dyn_cast<Operator>(V)) { 3813 // If we find a ptrtoint, we can transfer control back to the 3814 // regular getUnderlyingObjectFromInt. 3815 if (U->getOpcode() == Instruction::PtrToInt) 3816 return U->getOperand(0); 3817 // If we find an add of a constant, a multiplied value, or a phi, it's 3818 // likely that the other operand will lead us to the base 3819 // object. We don't have to worry about the case where the 3820 // object address is somehow being computed by the multiply, 3821 // because our callers only care when the result is an 3822 // identifiable object. 3823 if (U->getOpcode() != Instruction::Add || 3824 (!isa<ConstantInt>(U->getOperand(1)) && 3825 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 3826 !isa<PHINode>(U->getOperand(1)))) 3827 return V; 3828 V = U->getOperand(0); 3829 } else { 3830 return V; 3831 } 3832 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 3833 } while (true); 3834 } 3835 3836 /// This is a wrapper around GetUnderlyingObjects and adds support for basic 3837 /// ptrtoint+arithmetic+inttoptr sequences. 3838 /// It returns false if unidentified object is found in GetUnderlyingObjects. 3839 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, 3840 SmallVectorImpl<Value *> &Objects, 3841 const DataLayout &DL) { 3842 SmallPtrSet<const Value *, 16> Visited; 3843 SmallVector<const Value *, 4> Working(1, V); 3844 do { 3845 V = Working.pop_back_val(); 3846 3847 SmallVector<Value *, 4> Objs; 3848 GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL); 3849 3850 for (Value *V : Objs) { 3851 if (!Visited.insert(V).second) 3852 continue; 3853 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 3854 const Value *O = 3855 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 3856 if (O->getType()->isPointerTy()) { 3857 Working.push_back(O); 3858 continue; 3859 } 3860 } 3861 // If GetUnderlyingObjects fails to find an identifiable object, 3862 // getUnderlyingObjectsForCodeGen also fails for safety. 3863 if (!isIdentifiedObject(V)) { 3864 Objects.clear(); 3865 return false; 3866 } 3867 Objects.push_back(const_cast<Value *>(V)); 3868 } 3869 } while (!Working.empty()); 3870 return true; 3871 } 3872 3873 /// Return true if the only users of this pointer are lifetime markers. 3874 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 3875 for (const User *U : V->users()) { 3876 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3877 if (!II) return false; 3878 3879 if (!II->isLifetimeStartOrEnd()) 3880 return false; 3881 } 3882 return true; 3883 } 3884 3885 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3886 const Instruction *CtxI, 3887 const DominatorTree *DT) { 3888 const Operator *Inst = dyn_cast<Operator>(V); 3889 if (!Inst) 3890 return false; 3891 3892 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3893 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3894 if (C->canTrap()) 3895 return false; 3896 3897 switch (Inst->getOpcode()) { 3898 default: 3899 return true; 3900 case Instruction::UDiv: 3901 case Instruction::URem: { 3902 // x / y is undefined if y == 0. 3903 const APInt *V; 3904 if (match(Inst->getOperand(1), m_APInt(V))) 3905 return *V != 0; 3906 return false; 3907 } 3908 case Instruction::SDiv: 3909 case Instruction::SRem: { 3910 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3911 const APInt *Numerator, *Denominator; 3912 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3913 return false; 3914 // We cannot hoist this division if the denominator is 0. 3915 if (*Denominator == 0) 3916 return false; 3917 // It's safe to hoist if the denominator is not 0 or -1. 3918 if (*Denominator != -1) 3919 return true; 3920 // At this point we know that the denominator is -1. It is safe to hoist as 3921 // long we know that the numerator is not INT_MIN. 3922 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3923 return !Numerator->isMinSignedValue(); 3924 // The numerator *might* be MinSignedValue. 3925 return false; 3926 } 3927 case Instruction::Load: { 3928 const LoadInst *LI = cast<LoadInst>(Inst); 3929 if (!LI->isUnordered() || 3930 // Speculative load may create a race that did not exist in the source. 3931 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) || 3932 // Speculative load may load data from dirty regions. 3933 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) || 3934 LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress)) 3935 return false; 3936 const DataLayout &DL = LI->getModule()->getDataLayout(); 3937 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), 3938 LI->getAlignment(), DL, CtxI, DT); 3939 } 3940 case Instruction::Call: { 3941 auto *CI = cast<const CallInst>(Inst); 3942 const Function *Callee = CI->getCalledFunction(); 3943 3944 // The called function could have undefined behavior or side-effects, even 3945 // if marked readnone nounwind. 3946 return Callee && Callee->isSpeculatable(); 3947 } 3948 case Instruction::VAArg: 3949 case Instruction::Alloca: 3950 case Instruction::Invoke: 3951 case Instruction::CallBr: 3952 case Instruction::PHI: 3953 case Instruction::Store: 3954 case Instruction::Ret: 3955 case Instruction::Br: 3956 case Instruction::IndirectBr: 3957 case Instruction::Switch: 3958 case Instruction::Unreachable: 3959 case Instruction::Fence: 3960 case Instruction::AtomicRMW: 3961 case Instruction::AtomicCmpXchg: 3962 case Instruction::LandingPad: 3963 case Instruction::Resume: 3964 case Instruction::CatchSwitch: 3965 case Instruction::CatchPad: 3966 case Instruction::CatchRet: 3967 case Instruction::CleanupPad: 3968 case Instruction::CleanupRet: 3969 return false; // Misc instructions which have effects 3970 } 3971 } 3972 3973 bool llvm::mayBeMemoryDependent(const Instruction &I) { 3974 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 3975 } 3976 3977 OverflowResult llvm::computeOverflowForUnsignedMul( 3978 const Value *LHS, const Value *RHS, const DataLayout &DL, 3979 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 3980 bool UseInstrInfo) { 3981 // Multiplying n * m significant bits yields a result of n + m significant 3982 // bits. If the total number of significant bits does not exceed the 3983 // result bit width (minus 1), there is no overflow. 3984 // This means if we have enough leading zero bits in the operands 3985 // we can guarantee that the result does not overflow. 3986 // Ref: "Hacker's Delight" by Henry Warren 3987 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3988 KnownBits LHSKnown(BitWidth); 3989 KnownBits RHSKnown(BitWidth); 3990 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr, 3991 UseInstrInfo); 3992 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr, 3993 UseInstrInfo); 3994 // Note that underestimating the number of zero bits gives a more 3995 // conservative answer. 3996 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() + 3997 RHSKnown.countMinLeadingZeros(); 3998 // First handle the easy case: if we have enough zero bits there's 3999 // definitely no overflow. 4000 if (ZeroBits >= BitWidth) 4001 return OverflowResult::NeverOverflows; 4002 4003 // Get the largest possible values for each operand. 4004 APInt LHSMax = ~LHSKnown.Zero; 4005 APInt RHSMax = ~RHSKnown.Zero; 4006 4007 // We know the multiply operation doesn't overflow if the maximum values for 4008 // each operand will not overflow after we multiply them together. 4009 bool MaxOverflow; 4010 (void)LHSMax.umul_ov(RHSMax, MaxOverflow); 4011 if (!MaxOverflow) 4012 return OverflowResult::NeverOverflows; 4013 4014 // We know it always overflows if multiplying the smallest possible values for 4015 // the operands also results in overflow. 4016 bool MinOverflow; 4017 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow); 4018 if (MinOverflow) 4019 return OverflowResult::AlwaysOverflows; 4020 4021 return OverflowResult::MayOverflow; 4022 } 4023 4024 OverflowResult 4025 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS, 4026 const DataLayout &DL, AssumptionCache *AC, 4027 const Instruction *CxtI, 4028 const DominatorTree *DT, bool UseInstrInfo) { 4029 // Multiplying n * m significant bits yields a result of n + m significant 4030 // bits. If the total number of significant bits does not exceed the 4031 // result bit width (minus 1), there is no overflow. 4032 // This means if we have enough leading sign bits in the operands 4033 // we can guarantee that the result does not overflow. 4034 // Ref: "Hacker's Delight" by Henry Warren 4035 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 4036 4037 // Note that underestimating the number of sign bits gives a more 4038 // conservative answer. 4039 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + 4040 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); 4041 4042 // First handle the easy case: if we have enough sign bits there's 4043 // definitely no overflow. 4044 if (SignBits > BitWidth + 1) 4045 return OverflowResult::NeverOverflows; 4046 4047 // There are two ambiguous cases where there can be no overflow: 4048 // SignBits == BitWidth + 1 and 4049 // SignBits == BitWidth 4050 // The second case is difficult to check, therefore we only handle the 4051 // first case. 4052 if (SignBits == BitWidth + 1) { 4053 // It overflows only when both arguments are negative and the true 4054 // product is exactly the minimum negative number. 4055 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 4056 // For simplicity we just check if at least one side is not negative. 4057 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 4058 nullptr, UseInstrInfo); 4059 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 4060 nullptr, UseInstrInfo); 4061 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) 4062 return OverflowResult::NeverOverflows; 4063 } 4064 return OverflowResult::MayOverflow; 4065 } 4066 4067 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult. 4068 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) { 4069 switch (OR) { 4070 case ConstantRange::OverflowResult::MayOverflow: 4071 return OverflowResult::MayOverflow; 4072 case ConstantRange::OverflowResult::AlwaysOverflows: 4073 return OverflowResult::AlwaysOverflows; 4074 case ConstantRange::OverflowResult::NeverOverflows: 4075 return OverflowResult::NeverOverflows; 4076 } 4077 llvm_unreachable("Unknown OverflowResult"); 4078 } 4079 4080 /// Combine constant ranges from computeConstantRange() and computeKnownBits(). 4081 static ConstantRange computeConstantRangeIncludingKnownBits( 4082 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth, 4083 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4084 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) { 4085 KnownBits Known = computeKnownBits( 4086 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo); 4087 ConstantRange CR = computeConstantRange(V, UseInstrInfo); 4088 return ConstantRange::fromKnownBits(Known, ForSigned).intersectWith(CR); 4089 } 4090 4091 OverflowResult llvm::computeOverflowForUnsignedAdd( 4092 const Value *LHS, const Value *RHS, const DataLayout &DL, 4093 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4094 bool UseInstrInfo) { 4095 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 4096 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, 4097 nullptr, UseInstrInfo); 4098 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 4099 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, 4100 nullptr, UseInstrInfo); 4101 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange)); 4102 } 4103 4104 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 4105 const Value *RHS, 4106 const AddOperator *Add, 4107 const DataLayout &DL, 4108 AssumptionCache *AC, 4109 const Instruction *CxtI, 4110 const DominatorTree *DT) { 4111 if (Add && Add->hasNoSignedWrap()) { 4112 return OverflowResult::NeverOverflows; 4113 } 4114 4115 // If LHS and RHS each have at least two sign bits, the addition will look 4116 // like 4117 // 4118 // XX..... + 4119 // YY..... 4120 // 4121 // If the carry into the most significant position is 0, X and Y can't both 4122 // be 1 and therefore the carry out of the addition is also 0. 4123 // 4124 // If the carry into the most significant position is 1, X and Y can't both 4125 // be 0 and therefore the carry out of the addition is also 1. 4126 // 4127 // Since the carry into the most significant position is always equal to 4128 // the carry out of the addition, there is no signed overflow. 4129 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4130 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4131 return OverflowResult::NeverOverflows; 4132 4133 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 4134 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 4135 ConstantRange LHSRange = 4136 ConstantRange::fromKnownBits(LHSKnown, /*signed*/ true); 4137 ConstantRange RHSRange = 4138 ConstantRange::fromKnownBits(RHSKnown, /*signed*/ true); 4139 OverflowResult OR = 4140 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange)); 4141 if (OR != OverflowResult::MayOverflow) 4142 return OR; 4143 4144 // The remaining code needs Add to be available. Early returns if not so. 4145 if (!Add) 4146 return OverflowResult::MayOverflow; 4147 4148 // If the sign of Add is the same as at least one of the operands, this add 4149 // CANNOT overflow. If this can be determined from the known bits of the 4150 // operands the above signedAddMayOverflow() check will have already done so. 4151 // The only other way to improve on the known bits is from an assumption, so 4152 // call computeKnownBitsFromAssume() directly. 4153 bool LHSOrRHSKnownNonNegative = 4154 (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()); 4155 bool LHSOrRHSKnownNegative = 4156 (LHSKnown.isNegative() || RHSKnown.isNegative()); 4157 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 4158 KnownBits AddKnown(LHSKnown.getBitWidth()); 4159 computeKnownBitsFromAssume( 4160 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true)); 4161 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 4162 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) 4163 return OverflowResult::NeverOverflows; 4164 } 4165 4166 return OverflowResult::MayOverflow; 4167 } 4168 4169 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, 4170 const Value *RHS, 4171 const DataLayout &DL, 4172 AssumptionCache *AC, 4173 const Instruction *CxtI, 4174 const DominatorTree *DT) { 4175 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 4176 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); 4177 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 4178 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); 4179 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange)); 4180 } 4181 4182 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, 4183 const Value *RHS, 4184 const DataLayout &DL, 4185 AssumptionCache *AC, 4186 const Instruction *CxtI, 4187 const DominatorTree *DT) { 4188 // If LHS and RHS each have at least two sign bits, the subtraction 4189 // cannot overflow. 4190 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4191 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4192 return OverflowResult::NeverOverflows; 4193 4194 KnownBits LHSKnown = computeKnownBits(LHS, DL, 0, AC, CxtI, DT); 4195 KnownBits RHSKnown = computeKnownBits(RHS, DL, 0, AC, CxtI, DT); 4196 ConstantRange LHSRange = 4197 ConstantRange::fromKnownBits(LHSKnown, /*signed*/ true); 4198 ConstantRange RHSRange = 4199 ConstantRange::fromKnownBits(RHSKnown, /*signed*/ true); 4200 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange)); 4201 } 4202 4203 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, 4204 const DominatorTree &DT) { 4205 #ifndef NDEBUG 4206 auto IID = II->getIntrinsicID(); 4207 assert((IID == Intrinsic::sadd_with_overflow || 4208 IID == Intrinsic::uadd_with_overflow || 4209 IID == Intrinsic::ssub_with_overflow || 4210 IID == Intrinsic::usub_with_overflow || 4211 IID == Intrinsic::smul_with_overflow || 4212 IID == Intrinsic::umul_with_overflow) && 4213 "Not an overflow intrinsic!"); 4214 #endif 4215 4216 SmallVector<const BranchInst *, 2> GuardingBranches; 4217 SmallVector<const ExtractValueInst *, 2> Results; 4218 4219 for (const User *U : II->users()) { 4220 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 4221 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 4222 4223 if (EVI->getIndices()[0] == 0) 4224 Results.push_back(EVI); 4225 else { 4226 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 4227 4228 for (const auto *U : EVI->users()) 4229 if (const auto *B = dyn_cast<BranchInst>(U)) { 4230 assert(B->isConditional() && "How else is it using an i1?"); 4231 GuardingBranches.push_back(B); 4232 } 4233 } 4234 } else { 4235 // We are using the aggregate directly in a way we don't want to analyze 4236 // here (storing it to a global, say). 4237 return false; 4238 } 4239 } 4240 4241 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 4242 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 4243 if (!NoWrapEdge.isSingleEdge()) 4244 return false; 4245 4246 // Check if all users of the add are provably no-wrap. 4247 for (const auto *Result : Results) { 4248 // If the extractvalue itself is not executed on overflow, the we don't 4249 // need to check each use separately, since domination is transitive. 4250 if (DT.dominates(NoWrapEdge, Result->getParent())) 4251 continue; 4252 4253 for (auto &RU : Result->uses()) 4254 if (!DT.dominates(NoWrapEdge, RU)) 4255 return false; 4256 } 4257 4258 return true; 4259 }; 4260 4261 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); 4262 } 4263 4264 4265 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 4266 const DataLayout &DL, 4267 AssumptionCache *AC, 4268 const Instruction *CxtI, 4269 const DominatorTree *DT) { 4270 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 4271 Add, DL, AC, CxtI, DT); 4272 } 4273 4274 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 4275 const Value *RHS, 4276 const DataLayout &DL, 4277 AssumptionCache *AC, 4278 const Instruction *CxtI, 4279 const DominatorTree *DT) { 4280 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 4281 } 4282 4283 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 4284 // A memory operation returns normally if it isn't volatile. A volatile 4285 // operation is allowed to trap. 4286 // 4287 // An atomic operation isn't guaranteed to return in a reasonable amount of 4288 // time because it's possible for another thread to interfere with it for an 4289 // arbitrary length of time, but programs aren't allowed to rely on that. 4290 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 4291 return !LI->isVolatile(); 4292 if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 4293 return !SI->isVolatile(); 4294 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 4295 return !CXI->isVolatile(); 4296 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 4297 return !RMWI->isVolatile(); 4298 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I)) 4299 return !MII->isVolatile(); 4300 4301 // If there is no successor, then execution can't transfer to it. 4302 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) 4303 return !CRI->unwindsToCaller(); 4304 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) 4305 return !CatchSwitch->unwindsToCaller(); 4306 if (isa<ResumeInst>(I)) 4307 return false; 4308 if (isa<ReturnInst>(I)) 4309 return false; 4310 if (isa<UnreachableInst>(I)) 4311 return false; 4312 4313 // Calls can throw, or contain an infinite loop, or kill the process. 4314 if (auto CS = ImmutableCallSite(I)) { 4315 // Call sites that throw have implicit non-local control flow. 4316 if (!CS.doesNotThrow()) 4317 return false; 4318 4319 // Non-throwing call sites can loop infinitely, call exit/pthread_exit 4320 // etc. and thus not return. However, LLVM already assumes that 4321 // 4322 // - Thread exiting actions are modeled as writes to memory invisible to 4323 // the program. 4324 // 4325 // - Loops that don't have side effects (side effects are volatile/atomic 4326 // stores and IO) always terminate (see http://llvm.org/PR965). 4327 // Furthermore IO itself is also modeled as writes to memory invisible to 4328 // the program. 4329 // 4330 // We rely on those assumptions here, and use the memory effects of the call 4331 // target as a proxy for checking that it always returns. 4332 4333 // FIXME: This isn't aggressive enough; a call which only writes to a global 4334 // is guaranteed to return. 4335 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() || 4336 match(I, m_Intrinsic<Intrinsic::assume>()) || 4337 match(I, m_Intrinsic<Intrinsic::sideeffect>()) || 4338 match(I, m_Intrinsic<Intrinsic::experimental_widenable_condition>()); 4339 } 4340 4341 // Other instructions return normally. 4342 return true; 4343 } 4344 4345 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { 4346 // TODO: This is slightly conservative for invoke instruction since exiting 4347 // via an exception *is* normal control for them. 4348 for (auto I = BB->begin(), E = BB->end(); I != E; ++I) 4349 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 4350 return false; 4351 return true; 4352 } 4353 4354 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 4355 const Loop *L) { 4356 // The loop header is guaranteed to be executed for every iteration. 4357 // 4358 // FIXME: Relax this constraint to cover all basic blocks that are 4359 // guaranteed to be executed at every iteration. 4360 if (I->getParent() != L->getHeader()) return false; 4361 4362 for (const Instruction &LI : *L->getHeader()) { 4363 if (&LI == I) return true; 4364 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 4365 } 4366 llvm_unreachable("Instruction not contained in its own parent basic block."); 4367 } 4368 4369 bool llvm::propagatesFullPoison(const Instruction *I) { 4370 switch (I->getOpcode()) { 4371 case Instruction::Add: 4372 case Instruction::Sub: 4373 case Instruction::Xor: 4374 case Instruction::Trunc: 4375 case Instruction::BitCast: 4376 case Instruction::AddrSpaceCast: 4377 case Instruction::Mul: 4378 case Instruction::Shl: 4379 case Instruction::GetElementPtr: 4380 // These operations all propagate poison unconditionally. Note that poison 4381 // is not any particular value, so xor or subtraction of poison with 4382 // itself still yields poison, not zero. 4383 return true; 4384 4385 case Instruction::AShr: 4386 case Instruction::SExt: 4387 // For these operations, one bit of the input is replicated across 4388 // multiple output bits. A replicated poison bit is still poison. 4389 return true; 4390 4391 case Instruction::ICmp: 4392 // Comparing poison with any value yields poison. This is why, for 4393 // instance, x s< (x +nsw 1) can be folded to true. 4394 return true; 4395 4396 default: 4397 return false; 4398 } 4399 } 4400 4401 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) { 4402 switch (I->getOpcode()) { 4403 case Instruction::Store: 4404 return cast<StoreInst>(I)->getPointerOperand(); 4405 4406 case Instruction::Load: 4407 return cast<LoadInst>(I)->getPointerOperand(); 4408 4409 case Instruction::AtomicCmpXchg: 4410 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 4411 4412 case Instruction::AtomicRMW: 4413 return cast<AtomicRMWInst>(I)->getPointerOperand(); 4414 4415 case Instruction::UDiv: 4416 case Instruction::SDiv: 4417 case Instruction::URem: 4418 case Instruction::SRem: 4419 return I->getOperand(1); 4420 4421 default: 4422 return nullptr; 4423 } 4424 } 4425 4426 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) { 4427 // We currently only look for uses of poison values within the same basic 4428 // block, as that makes it easier to guarantee that the uses will be 4429 // executed given that PoisonI is executed. 4430 // 4431 // FIXME: Expand this to consider uses beyond the same basic block. To do 4432 // this, look out for the distinction between post-dominance and strong 4433 // post-dominance. 4434 const BasicBlock *BB = PoisonI->getParent(); 4435 4436 // Set of instructions that we have proved will yield poison if PoisonI 4437 // does. 4438 SmallSet<const Value *, 16> YieldsPoison; 4439 SmallSet<const BasicBlock *, 4> Visited; 4440 YieldsPoison.insert(PoisonI); 4441 Visited.insert(PoisonI->getParent()); 4442 4443 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end(); 4444 4445 unsigned Iter = 0; 4446 while (Iter++ < MaxDepth) { 4447 for (auto &I : make_range(Begin, End)) { 4448 if (&I != PoisonI) { 4449 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I); 4450 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) 4451 return true; 4452 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 4453 return false; 4454 } 4455 4456 // Mark poison that propagates from I through uses of I. 4457 if (YieldsPoison.count(&I)) { 4458 for (const User *User : I.users()) { 4459 const Instruction *UserI = cast<Instruction>(User); 4460 if (propagatesFullPoison(UserI)) 4461 YieldsPoison.insert(User); 4462 } 4463 } 4464 } 4465 4466 if (auto *NextBB = BB->getSingleSuccessor()) { 4467 if (Visited.insert(NextBB).second) { 4468 BB = NextBB; 4469 Begin = BB->getFirstNonPHI()->getIterator(); 4470 End = BB->end(); 4471 continue; 4472 } 4473 } 4474 4475 break; 4476 } 4477 return false; 4478 } 4479 4480 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 4481 if (FMF.noNaNs()) 4482 return true; 4483 4484 if (auto *C = dyn_cast<ConstantFP>(V)) 4485 return !C->isNaN(); 4486 4487 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 4488 if (!C->getElementType()->isFloatingPointTy()) 4489 return false; 4490 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 4491 if (C->getElementAsAPFloat(I).isNaN()) 4492 return false; 4493 } 4494 return true; 4495 } 4496 4497 return false; 4498 } 4499 4500 static bool isKnownNonZero(const Value *V) { 4501 if (auto *C = dyn_cast<ConstantFP>(V)) 4502 return !C->isZero(); 4503 4504 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 4505 if (!C->getElementType()->isFloatingPointTy()) 4506 return false; 4507 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 4508 if (C->getElementAsAPFloat(I).isZero()) 4509 return false; 4510 } 4511 return true; 4512 } 4513 4514 return false; 4515 } 4516 4517 /// Match clamp pattern for float types without care about NaNs or signed zeros. 4518 /// Given non-min/max outer cmp/select from the clamp pattern this 4519 /// function recognizes if it can be substitued by a "canonical" min/max 4520 /// pattern. 4521 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, 4522 Value *CmpLHS, Value *CmpRHS, 4523 Value *TrueVal, Value *FalseVal, 4524 Value *&LHS, Value *&RHS) { 4525 // Try to match 4526 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) 4527 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) 4528 // and return description of the outer Max/Min. 4529 4530 // First, check if select has inverse order: 4531 if (CmpRHS == FalseVal) { 4532 std::swap(TrueVal, FalseVal); 4533 Pred = CmpInst::getInversePredicate(Pred); 4534 } 4535 4536 // Assume success now. If there's no match, callers should not use these anyway. 4537 LHS = TrueVal; 4538 RHS = FalseVal; 4539 4540 const APFloat *FC1; 4541 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) 4542 return {SPF_UNKNOWN, SPNB_NA, false}; 4543 4544 const APFloat *FC2; 4545 switch (Pred) { 4546 case CmpInst::FCMP_OLT: 4547 case CmpInst::FCMP_OLE: 4548 case CmpInst::FCMP_ULT: 4549 case CmpInst::FCMP_ULE: 4550 if (match(FalseVal, 4551 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), 4552 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4553 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan) 4554 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; 4555 break; 4556 case CmpInst::FCMP_OGT: 4557 case CmpInst::FCMP_OGE: 4558 case CmpInst::FCMP_UGT: 4559 case CmpInst::FCMP_UGE: 4560 if (match(FalseVal, 4561 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), 4562 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4563 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan) 4564 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; 4565 break; 4566 default: 4567 break; 4568 } 4569 4570 return {SPF_UNKNOWN, SPNB_NA, false}; 4571 } 4572 4573 /// Recognize variations of: 4574 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 4575 static SelectPatternResult matchClamp(CmpInst::Predicate Pred, 4576 Value *CmpLHS, Value *CmpRHS, 4577 Value *TrueVal, Value *FalseVal) { 4578 // Swap the select operands and predicate to match the patterns below. 4579 if (CmpRHS != TrueVal) { 4580 Pred = ICmpInst::getSwappedPredicate(Pred); 4581 std::swap(TrueVal, FalseVal); 4582 } 4583 const APInt *C1; 4584 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 4585 const APInt *C2; 4586 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 4587 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 4588 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 4589 return {SPF_SMAX, SPNB_NA, false}; 4590 4591 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 4592 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 4593 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 4594 return {SPF_SMIN, SPNB_NA, false}; 4595 4596 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 4597 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 4598 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 4599 return {SPF_UMAX, SPNB_NA, false}; 4600 4601 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 4602 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 4603 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 4604 return {SPF_UMIN, SPNB_NA, false}; 4605 } 4606 return {SPF_UNKNOWN, SPNB_NA, false}; 4607 } 4608 4609 /// Recognize variations of: 4610 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) 4611 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, 4612 Value *CmpLHS, Value *CmpRHS, 4613 Value *TVal, Value *FVal, 4614 unsigned Depth) { 4615 // TODO: Allow FP min/max with nnan/nsz. 4616 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison"); 4617 4618 Value *A, *B; 4619 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); 4620 if (!SelectPatternResult::isMinOrMax(L.Flavor)) 4621 return {SPF_UNKNOWN, SPNB_NA, false}; 4622 4623 Value *C, *D; 4624 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); 4625 if (L.Flavor != R.Flavor) 4626 return {SPF_UNKNOWN, SPNB_NA, false}; 4627 4628 // We have something like: x Pred y ? min(a, b) : min(c, d). 4629 // Try to match the compare to the min/max operations of the select operands. 4630 // First, make sure we have the right compare predicate. 4631 switch (L.Flavor) { 4632 case SPF_SMIN: 4633 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { 4634 Pred = ICmpInst::getSwappedPredicate(Pred); 4635 std::swap(CmpLHS, CmpRHS); 4636 } 4637 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 4638 break; 4639 return {SPF_UNKNOWN, SPNB_NA, false}; 4640 case SPF_SMAX: 4641 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { 4642 Pred = ICmpInst::getSwappedPredicate(Pred); 4643 std::swap(CmpLHS, CmpRHS); 4644 } 4645 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 4646 break; 4647 return {SPF_UNKNOWN, SPNB_NA, false}; 4648 case SPF_UMIN: 4649 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { 4650 Pred = ICmpInst::getSwappedPredicate(Pred); 4651 std::swap(CmpLHS, CmpRHS); 4652 } 4653 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 4654 break; 4655 return {SPF_UNKNOWN, SPNB_NA, false}; 4656 case SPF_UMAX: 4657 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 4658 Pred = ICmpInst::getSwappedPredicate(Pred); 4659 std::swap(CmpLHS, CmpRHS); 4660 } 4661 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 4662 break; 4663 return {SPF_UNKNOWN, SPNB_NA, false}; 4664 default: 4665 return {SPF_UNKNOWN, SPNB_NA, false}; 4666 } 4667 4668 // If there is a common operand in the already matched min/max and the other 4669 // min/max operands match the compare operands (either directly or inverted), 4670 // then this is min/max of the same flavor. 4671 4672 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4673 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 4674 if (D == B) { 4675 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4676 match(A, m_Not(m_Specific(CmpRHS))))) 4677 return {L.Flavor, SPNB_NA, false}; 4678 } 4679 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4680 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 4681 if (C == B) { 4682 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4683 match(A, m_Not(m_Specific(CmpRHS))))) 4684 return {L.Flavor, SPNB_NA, false}; 4685 } 4686 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4687 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 4688 if (D == A) { 4689 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 4690 match(B, m_Not(m_Specific(CmpRHS))))) 4691 return {L.Flavor, SPNB_NA, false}; 4692 } 4693 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4694 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 4695 if (C == A) { 4696 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 4697 match(B, m_Not(m_Specific(CmpRHS))))) 4698 return {L.Flavor, SPNB_NA, false}; 4699 } 4700 4701 return {SPF_UNKNOWN, SPNB_NA, false}; 4702 } 4703 4704 /// Match non-obvious integer minimum and maximum sequences. 4705 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 4706 Value *CmpLHS, Value *CmpRHS, 4707 Value *TrueVal, Value *FalseVal, 4708 Value *&LHS, Value *&RHS, 4709 unsigned Depth) { 4710 // Assume success. If there's no match, callers should not use these anyway. 4711 LHS = TrueVal; 4712 RHS = FalseVal; 4713 4714 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); 4715 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4716 return SPR; 4717 4718 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); 4719 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 4720 return SPR; 4721 4722 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 4723 return {SPF_UNKNOWN, SPNB_NA, false}; 4724 4725 // Z = X -nsw Y 4726 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) 4727 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) 4728 if (match(TrueVal, m_Zero()) && 4729 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4730 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4731 4732 // Z = X -nsw Y 4733 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) 4734 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) 4735 if (match(FalseVal, m_Zero()) && 4736 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4737 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4738 4739 const APInt *C1; 4740 if (!match(CmpRHS, m_APInt(C1))) 4741 return {SPF_UNKNOWN, SPNB_NA, false}; 4742 4743 // An unsigned min/max can be written with a signed compare. 4744 const APInt *C2; 4745 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 4746 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 4747 // Is the sign bit set? 4748 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 4749 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 4750 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() && 4751 C2->isMaxSignedValue()) 4752 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4753 4754 // Is the sign bit clear? 4755 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 4756 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 4757 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && 4758 C2->isMinSignedValue()) 4759 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4760 } 4761 4762 // Look through 'not' ops to find disguised signed min/max. 4763 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C) 4764 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C) 4765 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) && 4766 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2) 4767 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4768 4769 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X) 4770 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X) 4771 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) && 4772 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2) 4773 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4774 4775 return {SPF_UNKNOWN, SPNB_NA, false}; 4776 } 4777 4778 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) { 4779 assert(X && Y && "Invalid operand"); 4780 4781 // X = sub (0, Y) || X = sub nsw (0, Y) 4782 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) || 4783 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y))))) 4784 return true; 4785 4786 // Y = sub (0, X) || Y = sub nsw (0, X) 4787 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) || 4788 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X))))) 4789 return true; 4790 4791 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A) 4792 Value *A, *B; 4793 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) && 4794 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) || 4795 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) && 4796 match(Y, m_NSWSub(m_Specific(B), m_Specific(A))))); 4797 } 4798 4799 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 4800 FastMathFlags FMF, 4801 Value *CmpLHS, Value *CmpRHS, 4802 Value *TrueVal, Value *FalseVal, 4803 Value *&LHS, Value *&RHS, 4804 unsigned Depth) { 4805 if (CmpInst::isFPPredicate(Pred)) { 4806 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one 4807 // 0.0 operand, set the compare's 0.0 operands to that same value for the 4808 // purpose of identifying min/max. Disregard vector constants with undefined 4809 // elements because those can not be back-propagated for analysis. 4810 Value *OutputZeroVal = nullptr; 4811 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) && 4812 !cast<Constant>(TrueVal)->containsUndefElement()) 4813 OutputZeroVal = TrueVal; 4814 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) && 4815 !cast<Constant>(FalseVal)->containsUndefElement()) 4816 OutputZeroVal = FalseVal; 4817 4818 if (OutputZeroVal) { 4819 if (match(CmpLHS, m_AnyZeroFP())) 4820 CmpLHS = OutputZeroVal; 4821 if (match(CmpRHS, m_AnyZeroFP())) 4822 CmpRHS = OutputZeroVal; 4823 } 4824 } 4825 4826 LHS = CmpLHS; 4827 RHS = CmpRHS; 4828 4829 // Signed zero may return inconsistent results between implementations. 4830 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 4831 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 4832 // Therefore, we behave conservatively and only proceed if at least one of the 4833 // operands is known to not be zero or if we don't care about signed zero. 4834 switch (Pred) { 4835 default: break; 4836 // FIXME: Include OGT/OLT/UGT/ULT. 4837 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 4838 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 4839 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4840 !isKnownNonZero(CmpRHS)) 4841 return {SPF_UNKNOWN, SPNB_NA, false}; 4842 } 4843 4844 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 4845 bool Ordered = false; 4846 4847 // When given one NaN and one non-NaN input: 4848 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 4849 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 4850 // ordered comparison fails), which could be NaN or non-NaN. 4851 // so here we discover exactly what NaN behavior is required/accepted. 4852 if (CmpInst::isFPPredicate(Pred)) { 4853 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 4854 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 4855 4856 if (LHSSafe && RHSSafe) { 4857 // Both operands are known non-NaN. 4858 NaNBehavior = SPNB_RETURNS_ANY; 4859 } else if (CmpInst::isOrdered(Pred)) { 4860 // An ordered comparison will return false when given a NaN, so it 4861 // returns the RHS. 4862 Ordered = true; 4863 if (LHSSafe) 4864 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 4865 NaNBehavior = SPNB_RETURNS_NAN; 4866 else if (RHSSafe) 4867 NaNBehavior = SPNB_RETURNS_OTHER; 4868 else 4869 // Completely unsafe. 4870 return {SPF_UNKNOWN, SPNB_NA, false}; 4871 } else { 4872 Ordered = false; 4873 // An unordered comparison will return true when given a NaN, so it 4874 // returns the LHS. 4875 if (LHSSafe) 4876 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 4877 NaNBehavior = SPNB_RETURNS_OTHER; 4878 else if (RHSSafe) 4879 NaNBehavior = SPNB_RETURNS_NAN; 4880 else 4881 // Completely unsafe. 4882 return {SPF_UNKNOWN, SPNB_NA, false}; 4883 } 4884 } 4885 4886 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 4887 std::swap(CmpLHS, CmpRHS); 4888 Pred = CmpInst::getSwappedPredicate(Pred); 4889 if (NaNBehavior == SPNB_RETURNS_NAN) 4890 NaNBehavior = SPNB_RETURNS_OTHER; 4891 else if (NaNBehavior == SPNB_RETURNS_OTHER) 4892 NaNBehavior = SPNB_RETURNS_NAN; 4893 Ordered = !Ordered; 4894 } 4895 4896 // ([if]cmp X, Y) ? X : Y 4897 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 4898 switch (Pred) { 4899 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 4900 case ICmpInst::ICMP_UGT: 4901 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 4902 case ICmpInst::ICMP_SGT: 4903 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 4904 case ICmpInst::ICMP_ULT: 4905 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 4906 case ICmpInst::ICMP_SLT: 4907 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 4908 case FCmpInst::FCMP_UGT: 4909 case FCmpInst::FCMP_UGE: 4910 case FCmpInst::FCMP_OGT: 4911 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 4912 case FCmpInst::FCMP_ULT: 4913 case FCmpInst::FCMP_ULE: 4914 case FCmpInst::FCMP_OLT: 4915 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 4916 } 4917 } 4918 4919 if (isKnownNegation(TrueVal, FalseVal)) { 4920 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can 4921 // match against either LHS or sext(LHS). 4922 auto MaybeSExtCmpLHS = 4923 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS))); 4924 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes()); 4925 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One()); 4926 if (match(TrueVal, MaybeSExtCmpLHS)) { 4927 // Set the return values. If the compare uses the negated value (-X >s 0), 4928 // swap the return values because the negated value is always 'RHS'. 4929 LHS = TrueVal; 4930 RHS = FalseVal; 4931 if (match(CmpLHS, m_Neg(m_Specific(FalseVal)))) 4932 std::swap(LHS, RHS); 4933 4934 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X) 4935 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X) 4936 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 4937 return {SPF_ABS, SPNB_NA, false}; 4938 4939 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X) 4940 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne)) 4941 return {SPF_ABS, SPNB_NA, false}; 4942 4943 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X) 4944 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X) 4945 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 4946 return {SPF_NABS, SPNB_NA, false}; 4947 } 4948 else if (match(FalseVal, MaybeSExtCmpLHS)) { 4949 // Set the return values. If the compare uses the negated value (-X >s 0), 4950 // swap the return values because the negated value is always 'RHS'. 4951 LHS = FalseVal; 4952 RHS = TrueVal; 4953 if (match(CmpLHS, m_Neg(m_Specific(TrueVal)))) 4954 std::swap(LHS, RHS); 4955 4956 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X) 4957 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X) 4958 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 4959 return {SPF_NABS, SPNB_NA, false}; 4960 4961 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X) 4962 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X) 4963 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 4964 return {SPF_ABS, SPNB_NA, false}; 4965 } 4966 } 4967 4968 if (CmpInst::isIntPredicate(Pred)) 4969 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); 4970 4971 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar 4972 // may return either -0.0 or 0.0, so fcmp/select pair has stricter 4973 // semantics than minNum. Be conservative in such case. 4974 if (NaNBehavior != SPNB_RETURNS_ANY || 4975 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4976 !isKnownNonZero(CmpRHS))) 4977 return {SPF_UNKNOWN, SPNB_NA, false}; 4978 4979 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 4980 } 4981 4982 /// Helps to match a select pattern in case of a type mismatch. 4983 /// 4984 /// The function processes the case when type of true and false values of a 4985 /// select instruction differs from type of the cmp instruction operands because 4986 /// of a cast instruction. The function checks if it is legal to move the cast 4987 /// operation after "select". If yes, it returns the new second value of 4988 /// "select" (with the assumption that cast is moved): 4989 /// 1. As operand of cast instruction when both values of "select" are same cast 4990 /// instructions. 4991 /// 2. As restored constant (by applying reverse cast operation) when the first 4992 /// value of the "select" is a cast operation and the second value is a 4993 /// constant. 4994 /// NOTE: We return only the new second value because the first value could be 4995 /// accessed as operand of cast instruction. 4996 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 4997 Instruction::CastOps *CastOp) { 4998 auto *Cast1 = dyn_cast<CastInst>(V1); 4999 if (!Cast1) 5000 return nullptr; 5001 5002 *CastOp = Cast1->getOpcode(); 5003 Type *SrcTy = Cast1->getSrcTy(); 5004 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 5005 // If V1 and V2 are both the same cast from the same type, look through V1. 5006 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 5007 return Cast2->getOperand(0); 5008 return nullptr; 5009 } 5010 5011 auto *C = dyn_cast<Constant>(V2); 5012 if (!C) 5013 return nullptr; 5014 5015 Constant *CastedTo = nullptr; 5016 switch (*CastOp) { 5017 case Instruction::ZExt: 5018 if (CmpI->isUnsigned()) 5019 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 5020 break; 5021 case Instruction::SExt: 5022 if (CmpI->isSigned()) 5023 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 5024 break; 5025 case Instruction::Trunc: 5026 Constant *CmpConst; 5027 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && 5028 CmpConst->getType() == SrcTy) { 5029 // Here we have the following case: 5030 // 5031 // %cond = cmp iN %x, CmpConst 5032 // %tr = trunc iN %x to iK 5033 // %narrowsel = select i1 %cond, iK %t, iK C 5034 // 5035 // We can always move trunc after select operation: 5036 // 5037 // %cond = cmp iN %x, CmpConst 5038 // %widesel = select i1 %cond, iN %x, iN CmpConst 5039 // %tr = trunc iN %widesel to iK 5040 // 5041 // Note that C could be extended in any way because we don't care about 5042 // upper bits after truncation. It can't be abs pattern, because it would 5043 // look like: 5044 // 5045 // select i1 %cond, x, -x. 5046 // 5047 // So only min/max pattern could be matched. Such match requires widened C 5048 // == CmpConst. That is why set widened C = CmpConst, condition trunc 5049 // CmpConst == C is checked below. 5050 CastedTo = CmpConst; 5051 } else { 5052 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 5053 } 5054 break; 5055 case Instruction::FPTrunc: 5056 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 5057 break; 5058 case Instruction::FPExt: 5059 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 5060 break; 5061 case Instruction::FPToUI: 5062 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 5063 break; 5064 case Instruction::FPToSI: 5065 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 5066 break; 5067 case Instruction::UIToFP: 5068 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 5069 break; 5070 case Instruction::SIToFP: 5071 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 5072 break; 5073 default: 5074 break; 5075 } 5076 5077 if (!CastedTo) 5078 return nullptr; 5079 5080 // Make sure the cast doesn't lose any information. 5081 Constant *CastedBack = 5082 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 5083 if (CastedBack != C) 5084 return nullptr; 5085 5086 return CastedTo; 5087 } 5088 5089 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 5090 Instruction::CastOps *CastOp, 5091 unsigned Depth) { 5092 if (Depth >= MaxDepth) 5093 return {SPF_UNKNOWN, SPNB_NA, false}; 5094 5095 SelectInst *SI = dyn_cast<SelectInst>(V); 5096 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 5097 5098 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 5099 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 5100 5101 CmpInst::Predicate Pred = CmpI->getPredicate(); 5102 Value *CmpLHS = CmpI->getOperand(0); 5103 Value *CmpRHS = CmpI->getOperand(1); 5104 Value *TrueVal = SI->getTrueValue(); 5105 Value *FalseVal = SI->getFalseValue(); 5106 FastMathFlags FMF; 5107 if (isa<FPMathOperator>(CmpI)) 5108 FMF = CmpI->getFastMathFlags(); 5109 5110 // Bail out early. 5111 if (CmpI->isEquality()) 5112 return {SPF_UNKNOWN, SPNB_NA, false}; 5113 5114 // Deal with type mismatches. 5115 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 5116 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { 5117 // If this is a potential fmin/fmax with a cast to integer, then ignore 5118 // -0.0 because there is no corresponding integer value. 5119 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 5120 FMF.setNoSignedZeros(); 5121 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 5122 cast<CastInst>(TrueVal)->getOperand(0), C, 5123 LHS, RHS, Depth); 5124 } 5125 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { 5126 // If this is a potential fmin/fmax with a cast to integer, then ignore 5127 // -0.0 because there is no corresponding integer value. 5128 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 5129 FMF.setNoSignedZeros(); 5130 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 5131 C, cast<CastInst>(FalseVal)->getOperand(0), 5132 LHS, RHS, Depth); 5133 } 5134 } 5135 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 5136 LHS, RHS, Depth); 5137 } 5138 5139 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) { 5140 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT; 5141 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT; 5142 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT; 5143 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT; 5144 if (SPF == SPF_FMINNUM) 5145 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; 5146 if (SPF == SPF_FMAXNUM) 5147 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; 5148 llvm_unreachable("unhandled!"); 5149 } 5150 5151 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) { 5152 if (SPF == SPF_SMIN) return SPF_SMAX; 5153 if (SPF == SPF_UMIN) return SPF_UMAX; 5154 if (SPF == SPF_SMAX) return SPF_SMIN; 5155 if (SPF == SPF_UMAX) return SPF_UMIN; 5156 llvm_unreachable("unhandled!"); 5157 } 5158 5159 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) { 5160 return getMinMaxPred(getInverseMinMaxFlavor(SPF)); 5161 } 5162 5163 /// Return true if "icmp Pred LHS RHS" is always true. 5164 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, 5165 const Value *RHS, const DataLayout &DL, 5166 unsigned Depth) { 5167 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 5168 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 5169 return true; 5170 5171 switch (Pred) { 5172 default: 5173 return false; 5174 5175 case CmpInst::ICMP_SLE: { 5176 const APInt *C; 5177 5178 // LHS s<= LHS +_{nsw} C if C >= 0 5179 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 5180 return !C->isNegative(); 5181 return false; 5182 } 5183 5184 case CmpInst::ICMP_ULE: { 5185 const APInt *C; 5186 5187 // LHS u<= LHS +_{nuw} C for any C 5188 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 5189 return true; 5190 5191 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 5192 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 5193 const Value *&X, 5194 const APInt *&CA, const APInt *&CB) { 5195 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 5196 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 5197 return true; 5198 5199 // If X & C == 0 then (X | C) == X +_{nuw} C 5200 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 5201 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 5202 KnownBits Known(CA->getBitWidth()); 5203 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, 5204 /*CxtI*/ nullptr, /*DT*/ nullptr); 5205 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 5206 return true; 5207 } 5208 5209 return false; 5210 }; 5211 5212 const Value *X; 5213 const APInt *CLHS, *CRHS; 5214 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 5215 return CLHS->ule(*CRHS); 5216 5217 return false; 5218 } 5219 } 5220 } 5221 5222 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 5223 /// ALHS ARHS" is true. Otherwise, return None. 5224 static Optional<bool> 5225 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 5226 const Value *ARHS, const Value *BLHS, const Value *BRHS, 5227 const DataLayout &DL, unsigned Depth) { 5228 switch (Pred) { 5229 default: 5230 return None; 5231 5232 case CmpInst::ICMP_SLT: 5233 case CmpInst::ICMP_SLE: 5234 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && 5235 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) 5236 return true; 5237 return None; 5238 5239 case CmpInst::ICMP_ULT: 5240 case CmpInst::ICMP_ULE: 5241 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && 5242 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) 5243 return true; 5244 return None; 5245 } 5246 } 5247 5248 /// Return true if the operands of the two compares match. IsSwappedOps is true 5249 /// when the operands match, but are swapped. 5250 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 5251 const Value *BLHS, const Value *BRHS, 5252 bool &IsSwappedOps) { 5253 5254 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 5255 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 5256 return IsMatchingOps || IsSwappedOps; 5257 } 5258 5259 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true. 5260 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false. 5261 /// Otherwise, return None if we can't infer anything. 5262 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 5263 CmpInst::Predicate BPred, 5264 bool AreSwappedOps) { 5265 // Canonicalize the predicate as if the operands were not commuted. 5266 if (AreSwappedOps) 5267 BPred = ICmpInst::getSwappedPredicate(BPred); 5268 5269 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 5270 return true; 5271 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 5272 return false; 5273 5274 return None; 5275 } 5276 5277 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true. 5278 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false. 5279 /// Otherwise, return None if we can't infer anything. 5280 static Optional<bool> 5281 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, 5282 const ConstantInt *C1, 5283 CmpInst::Predicate BPred, 5284 const ConstantInt *C2) { 5285 ConstantRange DomCR = 5286 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 5287 ConstantRange CR = 5288 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); 5289 ConstantRange Intersection = DomCR.intersectWith(CR); 5290 ConstantRange Difference = DomCR.difference(CR); 5291 if (Intersection.isEmptySet()) 5292 return false; 5293 if (Difference.isEmptySet()) 5294 return true; 5295 return None; 5296 } 5297 5298 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 5299 /// false. Otherwise, return None if we can't infer anything. 5300 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS, 5301 const ICmpInst *RHS, 5302 const DataLayout &DL, bool LHSIsTrue, 5303 unsigned Depth) { 5304 Value *ALHS = LHS->getOperand(0); 5305 Value *ARHS = LHS->getOperand(1); 5306 // The rest of the logic assumes the LHS condition is true. If that's not the 5307 // case, invert the predicate to make it so. 5308 ICmpInst::Predicate APred = 5309 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); 5310 5311 Value *BLHS = RHS->getOperand(0); 5312 Value *BRHS = RHS->getOperand(1); 5313 ICmpInst::Predicate BPred = RHS->getPredicate(); 5314 5315 // Can we infer anything when the two compares have matching operands? 5316 bool AreSwappedOps; 5317 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) { 5318 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 5319 APred, BPred, AreSwappedOps)) 5320 return Implication; 5321 // No amount of additional analysis will infer the second condition, so 5322 // early exit. 5323 return None; 5324 } 5325 5326 // Can we infer anything when the LHS operands match and the RHS operands are 5327 // constants (not necessarily matching)? 5328 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 5329 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 5330 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS))) 5331 return Implication; 5332 // No amount of additional analysis will infer the second condition, so 5333 // early exit. 5334 return None; 5335 } 5336 5337 if (APred == BPred) 5338 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth); 5339 return None; 5340 } 5341 5342 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 5343 /// false. Otherwise, return None if we can't infer anything. We expect the 5344 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction. 5345 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS, 5346 const ICmpInst *RHS, 5347 const DataLayout &DL, bool LHSIsTrue, 5348 unsigned Depth) { 5349 // The LHS must be an 'or' or an 'and' instruction. 5350 assert((LHS->getOpcode() == Instruction::And || 5351 LHS->getOpcode() == Instruction::Or) && 5352 "Expected LHS to be 'and' or 'or'."); 5353 5354 assert(Depth <= MaxDepth && "Hit recursion limit"); 5355 5356 // If the result of an 'or' is false, then we know both legs of the 'or' are 5357 // false. Similarly, if the result of an 'and' is true, then we know both 5358 // legs of the 'and' are true. 5359 Value *ALHS, *ARHS; 5360 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) || 5361 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) { 5362 // FIXME: Make this non-recursion. 5363 if (Optional<bool> Implication = 5364 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1)) 5365 return Implication; 5366 if (Optional<bool> Implication = 5367 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1)) 5368 return Implication; 5369 return None; 5370 } 5371 return None; 5372 } 5373 5374 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 5375 const DataLayout &DL, bool LHSIsTrue, 5376 unsigned Depth) { 5377 // Bail out when we hit the limit. 5378 if (Depth == MaxDepth) 5379 return None; 5380 5381 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for 5382 // example. 5383 if (LHS->getType() != RHS->getType()) 5384 return None; 5385 5386 Type *OpTy = LHS->getType(); 5387 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!"); 5388 5389 // LHS ==> RHS by definition 5390 if (LHS == RHS) 5391 return LHSIsTrue; 5392 5393 // FIXME: Extending the code below to handle vectors. 5394 if (OpTy->isVectorTy()) 5395 return None; 5396 5397 assert(OpTy->isIntegerTy(1) && "implied by above"); 5398 5399 // Both LHS and RHS are icmps. 5400 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); 5401 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS); 5402 if (LHSCmp && RHSCmp) 5403 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth); 5404 5405 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be 5406 // an icmp. FIXME: Add support for and/or on the RHS. 5407 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS); 5408 if (LHSBO && RHSCmp) { 5409 if ((LHSBO->getOpcode() == Instruction::And || 5410 LHSBO->getOpcode() == Instruction::Or)) 5411 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth); 5412 } 5413 return None; 5414 } 5415 5416 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond, 5417 const Instruction *ContextI, 5418 const DataLayout &DL) { 5419 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool"); 5420 if (!ContextI || !ContextI->getParent()) 5421 return None; 5422 5423 // TODO: This is a poor/cheap way to determine dominance. Should we use a 5424 // dominator tree (eg, from a SimplifyQuery) instead? 5425 const BasicBlock *ContextBB = ContextI->getParent(); 5426 const BasicBlock *PredBB = ContextBB->getSinglePredecessor(); 5427 if (!PredBB) 5428 return None; 5429 5430 // We need a conditional branch in the predecessor. 5431 Value *PredCond; 5432 BasicBlock *TrueBB, *FalseBB; 5433 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB))) 5434 return None; 5435 5436 // The branch should get simplified. Don't bother simplifying this condition. 5437 if (TrueBB == FalseBB) 5438 return None; 5439 5440 assert((TrueBB == ContextBB || FalseBB == ContextBB) && 5441 "Predecessor block does not point to successor?"); 5442 5443 // Is this condition implied by the predecessor condition? 5444 bool CondIsTrue = TrueBB == ContextBB; 5445 return isImpliedCondition(PredCond, Cond, DL, CondIsTrue); 5446 } 5447 5448 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, 5449 APInt &Upper, const InstrInfoQuery &IIQ) { 5450 unsigned Width = Lower.getBitWidth(); 5451 const APInt *C; 5452 switch (BO.getOpcode()) { 5453 case Instruction::Add: 5454 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) { 5455 // FIXME: If we have both nuw and nsw, we should reduce the range further. 5456 if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) { 5457 // 'add nuw x, C' produces [C, UINT_MAX]. 5458 Lower = *C; 5459 } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) { 5460 if (C->isNegative()) { 5461 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C]. 5462 Lower = APInt::getSignedMinValue(Width); 5463 Upper = APInt::getSignedMaxValue(Width) + *C + 1; 5464 } else { 5465 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX]. 5466 Lower = APInt::getSignedMinValue(Width) + *C; 5467 Upper = APInt::getSignedMaxValue(Width) + 1; 5468 } 5469 } 5470 } 5471 break; 5472 5473 case Instruction::And: 5474 if (match(BO.getOperand(1), m_APInt(C))) 5475 // 'and x, C' produces [0, C]. 5476 Upper = *C + 1; 5477 break; 5478 5479 case Instruction::Or: 5480 if (match(BO.getOperand(1), m_APInt(C))) 5481 // 'or x, C' produces [C, UINT_MAX]. 5482 Lower = *C; 5483 break; 5484 5485 case Instruction::AShr: 5486 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { 5487 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C]. 5488 Lower = APInt::getSignedMinValue(Width).ashr(*C); 5489 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1; 5490 } else if (match(BO.getOperand(0), m_APInt(C))) { 5491 unsigned ShiftAmount = Width - 1; 5492 if (!C->isNullValue() && IIQ.isExact(&BO)) 5493 ShiftAmount = C->countTrailingZeros(); 5494 if (C->isNegative()) { 5495 // 'ashr C, x' produces [C, C >> (Width-1)] 5496 Lower = *C; 5497 Upper = C->ashr(ShiftAmount) + 1; 5498 } else { 5499 // 'ashr C, x' produces [C >> (Width-1), C] 5500 Lower = C->ashr(ShiftAmount); 5501 Upper = *C + 1; 5502 } 5503 } 5504 break; 5505 5506 case Instruction::LShr: 5507 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { 5508 // 'lshr x, C' produces [0, UINT_MAX >> C]. 5509 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1; 5510 } else if (match(BO.getOperand(0), m_APInt(C))) { 5511 // 'lshr C, x' produces [C >> (Width-1), C]. 5512 unsigned ShiftAmount = Width - 1; 5513 if (!C->isNullValue() && IIQ.isExact(&BO)) 5514 ShiftAmount = C->countTrailingZeros(); 5515 Lower = C->lshr(ShiftAmount); 5516 Upper = *C + 1; 5517 } 5518 break; 5519 5520 case Instruction::Shl: 5521 if (match(BO.getOperand(0), m_APInt(C))) { 5522 if (IIQ.hasNoUnsignedWrap(&BO)) { 5523 // 'shl nuw C, x' produces [C, C << CLZ(C)] 5524 Lower = *C; 5525 Upper = Lower.shl(Lower.countLeadingZeros()) + 1; 5526 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw? 5527 if (C->isNegative()) { 5528 // 'shl nsw C, x' produces [C << CLO(C)-1, C] 5529 unsigned ShiftAmount = C->countLeadingOnes() - 1; 5530 Lower = C->shl(ShiftAmount); 5531 Upper = *C + 1; 5532 } else { 5533 // 'shl nsw C, x' produces [C, C << CLZ(C)-1] 5534 unsigned ShiftAmount = C->countLeadingZeros() - 1; 5535 Lower = *C; 5536 Upper = C->shl(ShiftAmount) + 1; 5537 } 5538 } 5539 } 5540 break; 5541 5542 case Instruction::SDiv: 5543 if (match(BO.getOperand(1), m_APInt(C))) { 5544 APInt IntMin = APInt::getSignedMinValue(Width); 5545 APInt IntMax = APInt::getSignedMaxValue(Width); 5546 if (C->isAllOnesValue()) { 5547 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] 5548 // where C != -1 and C != 0 and C != 1 5549 Lower = IntMin + 1; 5550 Upper = IntMax + 1; 5551 } else if (C->countLeadingZeros() < Width - 1) { 5552 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C] 5553 // where C != -1 and C != 0 and C != 1 5554 Lower = IntMin.sdiv(*C); 5555 Upper = IntMax.sdiv(*C); 5556 if (Lower.sgt(Upper)) 5557 std::swap(Lower, Upper); 5558 Upper = Upper + 1; 5559 assert(Upper != Lower && "Upper part of range has wrapped!"); 5560 } 5561 } else if (match(BO.getOperand(0), m_APInt(C))) { 5562 if (C->isMinSignedValue()) { 5563 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. 5564 Lower = *C; 5565 Upper = Lower.lshr(1) + 1; 5566 } else { 5567 // 'sdiv C, x' produces [-|C|, |C|]. 5568 Upper = C->abs() + 1; 5569 Lower = (-Upper) + 1; 5570 } 5571 } 5572 break; 5573 5574 case Instruction::UDiv: 5575 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) { 5576 // 'udiv x, C' produces [0, UINT_MAX / C]. 5577 Upper = APInt::getMaxValue(Width).udiv(*C) + 1; 5578 } else if (match(BO.getOperand(0), m_APInt(C))) { 5579 // 'udiv C, x' produces [0, C]. 5580 Upper = *C + 1; 5581 } 5582 break; 5583 5584 case Instruction::SRem: 5585 if (match(BO.getOperand(1), m_APInt(C))) { 5586 // 'srem x, C' produces (-|C|, |C|). 5587 Upper = C->abs(); 5588 Lower = (-Upper) + 1; 5589 } 5590 break; 5591 5592 case Instruction::URem: 5593 if (match(BO.getOperand(1), m_APInt(C))) 5594 // 'urem x, C' produces [0, C). 5595 Upper = *C; 5596 break; 5597 5598 default: 5599 break; 5600 } 5601 } 5602 5603 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower, 5604 APInt &Upper) { 5605 unsigned Width = Lower.getBitWidth(); 5606 const APInt *C; 5607 switch (II.getIntrinsicID()) { 5608 case Intrinsic::uadd_sat: 5609 // uadd.sat(x, C) produces [C, UINT_MAX]. 5610 if (match(II.getOperand(0), m_APInt(C)) || 5611 match(II.getOperand(1), m_APInt(C))) 5612 Lower = *C; 5613 break; 5614 case Intrinsic::sadd_sat: 5615 if (match(II.getOperand(0), m_APInt(C)) || 5616 match(II.getOperand(1), m_APInt(C))) { 5617 if (C->isNegative()) { 5618 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)]. 5619 Lower = APInt::getSignedMinValue(Width); 5620 Upper = APInt::getSignedMaxValue(Width) + *C + 1; 5621 } else { 5622 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX]. 5623 Lower = APInt::getSignedMinValue(Width) + *C; 5624 Upper = APInt::getSignedMaxValue(Width) + 1; 5625 } 5626 } 5627 break; 5628 case Intrinsic::usub_sat: 5629 // usub.sat(C, x) produces [0, C]. 5630 if (match(II.getOperand(0), m_APInt(C))) 5631 Upper = *C + 1; 5632 // usub.sat(x, C) produces [0, UINT_MAX - C]. 5633 else if (match(II.getOperand(1), m_APInt(C))) 5634 Upper = APInt::getMaxValue(Width) - *C + 1; 5635 break; 5636 case Intrinsic::ssub_sat: 5637 if (match(II.getOperand(0), m_APInt(C))) { 5638 if (C->isNegative()) { 5639 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)]. 5640 Lower = APInt::getSignedMinValue(Width); 5641 Upper = *C - APInt::getSignedMinValue(Width) + 1; 5642 } else { 5643 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX]. 5644 Lower = *C - APInt::getSignedMaxValue(Width); 5645 Upper = APInt::getSignedMaxValue(Width) + 1; 5646 } 5647 } else if (match(II.getOperand(1), m_APInt(C))) { 5648 if (C->isNegative()) { 5649 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]: 5650 Lower = APInt::getSignedMinValue(Width) - *C; 5651 Upper = APInt::getSignedMaxValue(Width) + 1; 5652 } else { 5653 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C]. 5654 Lower = APInt::getSignedMinValue(Width); 5655 Upper = APInt::getSignedMaxValue(Width) - *C + 1; 5656 } 5657 } 5658 break; 5659 default: 5660 break; 5661 } 5662 } 5663 5664 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower, 5665 APInt &Upper) { 5666 const Value *LHS, *RHS; 5667 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS); 5668 if (R.Flavor == SPF_UNKNOWN) 5669 return; 5670 5671 unsigned BitWidth = SI.getType()->getScalarSizeInBits(); 5672 5673 if (R.Flavor == SelectPatternFlavor::SPF_ABS) { 5674 // If the negation part of the abs (in RHS) has the NSW flag, 5675 // then the result of abs(X) is [0..SIGNED_MAX], 5676 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. 5677 Lower = APInt::getNullValue(BitWidth); 5678 if (cast<Instruction>(RHS)->hasNoSignedWrap()) 5679 Upper = APInt::getSignedMaxValue(BitWidth) + 1; 5680 else 5681 Upper = APInt::getSignedMinValue(BitWidth) + 1; 5682 return; 5683 } 5684 5685 if (R.Flavor == SelectPatternFlavor::SPF_NABS) { 5686 // The result of -abs(X) is <= 0. 5687 Lower = APInt::getSignedMinValue(BitWidth); 5688 Upper = APInt(BitWidth, 1); 5689 return; 5690 } 5691 5692 // TODO Handle min/max flavors. 5693 } 5694 5695 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo) { 5696 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction"); 5697 5698 const APInt *C; 5699 if (match(V, m_APInt(C))) 5700 return ConstantRange(*C); 5701 5702 InstrInfoQuery IIQ(UseInstrInfo); 5703 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 5704 APInt Lower = APInt(BitWidth, 0); 5705 APInt Upper = APInt(BitWidth, 0); 5706 if (auto *BO = dyn_cast<BinaryOperator>(V)) 5707 setLimitsForBinOp(*BO, Lower, Upper, IIQ); 5708 else if (auto *II = dyn_cast<IntrinsicInst>(V)) 5709 setLimitsForIntrinsic(*II, Lower, Upper); 5710 else if (auto *SI = dyn_cast<SelectInst>(V)) 5711 setLimitsForSelectPattern(*SI, Lower, Upper); 5712 5713 ConstantRange CR = Lower != Upper ? ConstantRange(Lower, Upper) 5714 : ConstantRange::getFull(BitWidth); 5715 5716 if (auto *I = dyn_cast<Instruction>(V)) 5717 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range)) 5718 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range)); 5719 5720 return CR; 5721 } 5722