1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains routines that help analyze properties that chains of 11 // computations have. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/ADT/APFloat.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/StringRef.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/InstructionSimplify.h" 30 #include "llvm/Analysis/Loads.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 33 #include "llvm/Analysis/TargetLibraryInfo.h" 34 #include "llvm/IR/Argument.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/CallSite.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/ConstantRange.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/DiagnosticInfo.h" 44 #include "llvm/IR/Dominators.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/GetElementPtrTypeIterator.h" 47 #include "llvm/IR/GlobalAlias.h" 48 #include "llvm/IR/GlobalValue.h" 49 #include "llvm/IR/GlobalVariable.h" 50 #include "llvm/IR/InstrTypes.h" 51 #include "llvm/IR/Instruction.h" 52 #include "llvm/IR/Instructions.h" 53 #include "llvm/IR/IntrinsicInst.h" 54 #include "llvm/IR/Intrinsics.h" 55 #include "llvm/IR/LLVMContext.h" 56 #include "llvm/IR/Metadata.h" 57 #include "llvm/IR/Module.h" 58 #include "llvm/IR/Operator.h" 59 #include "llvm/IR/PatternMatch.h" 60 #include "llvm/IR/Type.h" 61 #include "llvm/IR/User.h" 62 #include "llvm/IR/Value.h" 63 #include "llvm/Support/Casting.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/Compiler.h" 66 #include "llvm/Support/ErrorHandling.h" 67 #include "llvm/Support/KnownBits.h" 68 #include "llvm/Support/MathExtras.h" 69 #include <algorithm> 70 #include <array> 71 #include <cassert> 72 #include <cstdint> 73 #include <iterator> 74 #include <utility> 75 76 using namespace llvm; 77 using namespace llvm::PatternMatch; 78 79 const unsigned MaxDepth = 6; 80 81 // Controls the number of uses of the value searched for possible 82 // dominating comparisons. 83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 84 cl::Hidden, cl::init(20)); 85 86 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 87 /// returns the element type's bitwidth. 88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 89 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 90 return BitWidth; 91 92 return DL.getPointerTypeSizeInBits(Ty); 93 } 94 95 namespace { 96 97 // Simplifying using an assume can only be done in a particular control-flow 98 // context (the context instruction provides that context). If an assume and 99 // the context instruction are not in the same block then the DT helps in 100 // figuring out if we can use it. 101 struct Query { 102 const DataLayout &DL; 103 AssumptionCache *AC; 104 const Instruction *CxtI; 105 const DominatorTree *DT; 106 107 // Unlike the other analyses, this may be a nullptr because not all clients 108 // provide it currently. 109 OptimizationRemarkEmitter *ORE; 110 111 /// Set of assumptions that should be excluded from further queries. 112 /// This is because of the potential for mutual recursion to cause 113 /// computeKnownBits to repeatedly visit the same assume intrinsic. The 114 /// classic case of this is assume(x = y), which will attempt to determine 115 /// bits in x from bits in y, which will attempt to determine bits in y from 116 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 117 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo 118 /// (all of which can call computeKnownBits), and so on. 119 std::array<const Value *, MaxDepth> Excluded; 120 121 unsigned NumExcluded = 0; 122 123 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 124 const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr) 125 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE) {} 126 127 Query(const Query &Q, const Value *NewExcl) 128 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), 129 NumExcluded(Q.NumExcluded) { 130 Excluded = Q.Excluded; 131 Excluded[NumExcluded++] = NewExcl; 132 assert(NumExcluded <= Excluded.size()); 133 } 134 135 bool isExcluded(const Value *Value) const { 136 if (NumExcluded == 0) 137 return false; 138 auto End = Excluded.begin() + NumExcluded; 139 return std::find(Excluded.begin(), End, Value) != End; 140 } 141 }; 142 143 } // end anonymous namespace 144 145 // Given the provided Value and, potentially, a context instruction, return 146 // the preferred context instruction (if any). 147 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 148 // If we've been provided with a context instruction, then use that (provided 149 // it has been inserted). 150 if (CxtI && CxtI->getParent()) 151 return CxtI; 152 153 // If the value is really an already-inserted instruction, then use that. 154 CxtI = dyn_cast<Instruction>(V); 155 if (CxtI && CxtI->getParent()) 156 return CxtI; 157 158 return nullptr; 159 } 160 161 static void computeKnownBits(const Value *V, KnownBits &Known, 162 unsigned Depth, const Query &Q); 163 164 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 165 const DataLayout &DL, unsigned Depth, 166 AssumptionCache *AC, const Instruction *CxtI, 167 const DominatorTree *DT, 168 OptimizationRemarkEmitter *ORE) { 169 ::computeKnownBits(V, Known, Depth, 170 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE)); 171 } 172 173 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 174 const Query &Q); 175 176 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 177 unsigned Depth, AssumptionCache *AC, 178 const Instruction *CxtI, 179 const DominatorTree *DT, 180 OptimizationRemarkEmitter *ORE) { 181 return ::computeKnownBits(V, Depth, 182 Query(DL, AC, safeCxtI(V, CxtI), DT, ORE)); 183 } 184 185 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 186 const DataLayout &DL, 187 AssumptionCache *AC, const Instruction *CxtI, 188 const DominatorTree *DT) { 189 assert(LHS->getType() == RHS->getType() && 190 "LHS and RHS should have the same type"); 191 assert(LHS->getType()->isIntOrIntVectorTy() && 192 "LHS and RHS should be integers"); 193 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 194 KnownBits LHSKnown(IT->getBitWidth()); 195 KnownBits RHSKnown(IT->getBitWidth()); 196 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT); 197 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT); 198 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue(); 199 } 200 201 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) { 202 for (const User *U : CxtI->users()) { 203 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) 204 if (IC->isEquality()) 205 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1))) 206 if (C->isNullValue()) 207 continue; 208 return false; 209 } 210 return true; 211 } 212 213 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 214 const Query &Q); 215 216 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 217 bool OrZero, 218 unsigned Depth, AssumptionCache *AC, 219 const Instruction *CxtI, 220 const DominatorTree *DT) { 221 return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth, 222 Query(DL, AC, safeCxtI(V, CxtI), DT)); 223 } 224 225 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 226 227 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 228 AssumptionCache *AC, const Instruction *CxtI, 229 const DominatorTree *DT) { 230 return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 231 } 232 233 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 234 unsigned Depth, 235 AssumptionCache *AC, const Instruction *CxtI, 236 const DominatorTree *DT) { 237 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT); 238 return Known.isNonNegative(); 239 } 240 241 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 242 AssumptionCache *AC, const Instruction *CxtI, 243 const DominatorTree *DT) { 244 if (auto *CI = dyn_cast<ConstantInt>(V)) 245 return CI->getValue().isStrictlyPositive(); 246 247 // TODO: We'd doing two recursive queries here. We should factor this such 248 // that only a single query is needed. 249 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) && 250 isKnownNonZero(V, DL, Depth, AC, CxtI, DT); 251 } 252 253 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 254 AssumptionCache *AC, const Instruction *CxtI, 255 const DominatorTree *DT) { 256 KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT); 257 return Known.isNegative(); 258 } 259 260 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); 261 262 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 263 const DataLayout &DL, 264 AssumptionCache *AC, const Instruction *CxtI, 265 const DominatorTree *DT) { 266 return ::isKnownNonEqual(V1, V2, Query(DL, AC, 267 safeCxtI(V1, safeCxtI(V2, CxtI)), 268 DT)); 269 } 270 271 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 272 const Query &Q); 273 274 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 275 const DataLayout &DL, 276 unsigned Depth, AssumptionCache *AC, 277 const Instruction *CxtI, const DominatorTree *DT) { 278 return ::MaskedValueIsZero(V, Mask, Depth, 279 Query(DL, AC, safeCxtI(V, CxtI), DT)); 280 } 281 282 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 283 const Query &Q); 284 285 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 286 unsigned Depth, AssumptionCache *AC, 287 const Instruction *CxtI, 288 const DominatorTree *DT) { 289 return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT)); 290 } 291 292 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 293 bool NSW, 294 KnownBits &KnownOut, KnownBits &Known2, 295 unsigned Depth, const Query &Q) { 296 unsigned BitWidth = KnownOut.getBitWidth(); 297 298 // If an initial sequence of bits in the result is not needed, the 299 // corresponding bits in the operands are not needed. 300 KnownBits LHSKnown(BitWidth); 301 computeKnownBits(Op0, LHSKnown, Depth + 1, Q); 302 computeKnownBits(Op1, Known2, Depth + 1, Q); 303 304 KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2); 305 } 306 307 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 308 KnownBits &Known, KnownBits &Known2, 309 unsigned Depth, const Query &Q) { 310 unsigned BitWidth = Known.getBitWidth(); 311 computeKnownBits(Op1, Known, Depth + 1, Q); 312 computeKnownBits(Op0, Known2, Depth + 1, Q); 313 314 bool isKnownNegative = false; 315 bool isKnownNonNegative = false; 316 // If the multiplication is known not to overflow, compute the sign bit. 317 if (NSW) { 318 if (Op0 == Op1) { 319 // The product of a number with itself is non-negative. 320 isKnownNonNegative = true; 321 } else { 322 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 323 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 324 bool isKnownNegativeOp1 = Known.isNegative(); 325 bool isKnownNegativeOp0 = Known2.isNegative(); 326 // The product of two numbers with the same sign is non-negative. 327 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 328 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 329 // The product of a negative number and a non-negative number is either 330 // negative or zero. 331 if (!isKnownNonNegative) 332 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 333 isKnownNonZero(Op0, Depth, Q)) || 334 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 335 isKnownNonZero(Op1, Depth, Q)); 336 } 337 } 338 339 // If low bits are zero in either operand, output low known-0 bits. 340 // Also compute a conservative estimate for high known-0 bits. 341 // More trickiness is possible, but this is sufficient for the 342 // interesting case of alignment computation. 343 unsigned TrailZ = Known.countMinTrailingZeros() + 344 Known2.countMinTrailingZeros(); 345 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 346 Known2.countMinLeadingZeros(), 347 BitWidth) - BitWidth; 348 349 TrailZ = std::min(TrailZ, BitWidth); 350 LeadZ = std::min(LeadZ, BitWidth); 351 Known.resetAll(); 352 Known.Zero.setLowBits(TrailZ); 353 Known.Zero.setHighBits(LeadZ); 354 355 // Only make use of no-wrap flags if we failed to compute the sign bit 356 // directly. This matters if the multiplication always overflows, in 357 // which case we prefer to follow the result of the direct computation, 358 // though as the program is invoking undefined behaviour we can choose 359 // whatever we like here. 360 if (isKnownNonNegative && !Known.isNegative()) 361 Known.makeNonNegative(); 362 else if (isKnownNegative && !Known.isNonNegative()) 363 Known.makeNegative(); 364 } 365 366 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 367 KnownBits &Known) { 368 unsigned BitWidth = Known.getBitWidth(); 369 unsigned NumRanges = Ranges.getNumOperands() / 2; 370 assert(NumRanges >= 1); 371 372 Known.Zero.setAllBits(); 373 Known.One.setAllBits(); 374 375 for (unsigned i = 0; i < NumRanges; ++i) { 376 ConstantInt *Lower = 377 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 378 ConstantInt *Upper = 379 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 380 ConstantRange Range(Lower->getValue(), Upper->getValue()); 381 382 // The first CommonPrefixBits of all values in Range are equal. 383 unsigned CommonPrefixBits = 384 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 385 386 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 387 Known.One &= Range.getUnsignedMax() & Mask; 388 Known.Zero &= ~Range.getUnsignedMax() & Mask; 389 } 390 } 391 392 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 393 SmallVector<const Value *, 16> WorkSet(1, I); 394 SmallPtrSet<const Value *, 32> Visited; 395 SmallPtrSet<const Value *, 16> EphValues; 396 397 // The instruction defining an assumption's condition itself is always 398 // considered ephemeral to that assumption (even if it has other 399 // non-ephemeral users). See r246696's test case for an example. 400 if (is_contained(I->operands(), E)) 401 return true; 402 403 while (!WorkSet.empty()) { 404 const Value *V = WorkSet.pop_back_val(); 405 if (!Visited.insert(V).second) 406 continue; 407 408 // If all uses of this value are ephemeral, then so is this value. 409 if (llvm::all_of(V->users(), [&](const User *U) { 410 return EphValues.count(U); 411 })) { 412 if (V == E) 413 return true; 414 415 if (V == I || isSafeToSpeculativelyExecute(V)) { 416 EphValues.insert(V); 417 if (const User *U = dyn_cast<User>(V)) 418 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 419 J != JE; ++J) 420 WorkSet.push_back(*J); 421 } 422 } 423 } 424 425 return false; 426 } 427 428 // Is this an intrinsic that cannot be speculated but also cannot trap? 429 static bool isAssumeLikeIntrinsic(const Instruction *I) { 430 if (const CallInst *CI = dyn_cast<CallInst>(I)) 431 if (Function *F = CI->getCalledFunction()) 432 switch (F->getIntrinsicID()) { 433 default: break; 434 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 435 case Intrinsic::assume: 436 case Intrinsic::dbg_declare: 437 case Intrinsic::dbg_value: 438 case Intrinsic::invariant_start: 439 case Intrinsic::invariant_end: 440 case Intrinsic::lifetime_start: 441 case Intrinsic::lifetime_end: 442 case Intrinsic::objectsize: 443 case Intrinsic::ptr_annotation: 444 case Intrinsic::var_annotation: 445 return true; 446 } 447 448 return false; 449 } 450 451 bool llvm::isValidAssumeForContext(const Instruction *Inv, 452 const Instruction *CxtI, 453 const DominatorTree *DT) { 454 // There are two restrictions on the use of an assume: 455 // 1. The assume must dominate the context (or the control flow must 456 // reach the assume whenever it reaches the context). 457 // 2. The context must not be in the assume's set of ephemeral values 458 // (otherwise we will use the assume to prove that the condition 459 // feeding the assume is trivially true, thus causing the removal of 460 // the assume). 461 462 if (DT) { 463 if (DT->dominates(Inv, CxtI)) 464 return true; 465 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 466 // We don't have a DT, but this trivially dominates. 467 return true; 468 } 469 470 // With or without a DT, the only remaining case we will check is if the 471 // instructions are in the same BB. Give up if that is not the case. 472 if (Inv->getParent() != CxtI->getParent()) 473 return false; 474 475 // If we have a dom tree, then we now know that the assume doens't dominate 476 // the other instruction. If we don't have a dom tree then we can check if 477 // the assume is first in the BB. 478 if (!DT) { 479 // Search forward from the assume until we reach the context (or the end 480 // of the block); the common case is that the assume will come first. 481 for (auto I = std::next(BasicBlock::const_iterator(Inv)), 482 IE = Inv->getParent()->end(); I != IE; ++I) 483 if (&*I == CxtI) 484 return true; 485 } 486 487 // The context comes first, but they're both in the same block. Make sure 488 // there is nothing in between that might interrupt the control flow. 489 for (BasicBlock::const_iterator I = 490 std::next(BasicBlock::const_iterator(CxtI)), IE(Inv); 491 I != IE; ++I) 492 if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I)) 493 return false; 494 495 return !isEphemeralValueOf(Inv, CxtI); 496 } 497 498 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 499 unsigned Depth, const Query &Q) { 500 // Use of assumptions is context-sensitive. If we don't have a context, we 501 // cannot use them! 502 if (!Q.AC || !Q.CxtI) 503 return; 504 505 unsigned BitWidth = Known.getBitWidth(); 506 507 // Note that the patterns below need to be kept in sync with the code 508 // in AssumptionCache::updateAffectedValues. 509 510 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 511 if (!AssumeVH) 512 continue; 513 CallInst *I = cast<CallInst>(AssumeVH); 514 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 515 "Got assumption for the wrong function!"); 516 if (Q.isExcluded(I)) 517 continue; 518 519 // Warning: This loop can end up being somewhat performance sensetive. 520 // We're running this loop for once for each value queried resulting in a 521 // runtime of ~O(#assumes * #values). 522 523 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 524 "must be an assume intrinsic"); 525 526 Value *Arg = I->getArgOperand(0); 527 528 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 529 assert(BitWidth == 1 && "assume operand is not i1?"); 530 Known.setAllOnes(); 531 return; 532 } 533 if (match(Arg, m_Not(m_Specific(V))) && 534 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 535 assert(BitWidth == 1 && "assume operand is not i1?"); 536 Known.setAllZero(); 537 return; 538 } 539 540 // The remaining tests are all recursive, so bail out if we hit the limit. 541 if (Depth == MaxDepth) 542 continue; 543 544 Value *A, *B; 545 auto m_V = m_CombineOr(m_Specific(V), 546 m_CombineOr(m_PtrToInt(m_Specific(V)), 547 m_BitCast(m_Specific(V)))); 548 549 CmpInst::Predicate Pred; 550 ConstantInt *C; 551 // assume(v = a) 552 if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) && 553 Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 554 KnownBits RHSKnown(BitWidth); 555 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 556 Known.Zero |= RHSKnown.Zero; 557 Known.One |= RHSKnown.One; 558 // assume(v & b = a) 559 } else if (match(Arg, 560 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 561 Pred == ICmpInst::ICMP_EQ && 562 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 563 KnownBits RHSKnown(BitWidth); 564 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 565 KnownBits MaskKnown(BitWidth); 566 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 567 568 // For those bits in the mask that are known to be one, we can propagate 569 // known bits from the RHS to V. 570 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 571 Known.One |= RHSKnown.One & MaskKnown.One; 572 // assume(~(v & b) = a) 573 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 574 m_Value(A))) && 575 Pred == ICmpInst::ICMP_EQ && 576 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 577 KnownBits RHSKnown(BitWidth); 578 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 579 KnownBits MaskKnown(BitWidth); 580 computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I)); 581 582 // For those bits in the mask that are known to be one, we can propagate 583 // inverted known bits from the RHS to V. 584 Known.Zero |= RHSKnown.One & MaskKnown.One; 585 Known.One |= RHSKnown.Zero & MaskKnown.One; 586 // assume(v | b = a) 587 } else if (match(Arg, 588 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 589 Pred == ICmpInst::ICMP_EQ && 590 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 591 KnownBits RHSKnown(BitWidth); 592 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 593 KnownBits BKnown(BitWidth); 594 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 595 596 // For those bits in B that are known to be zero, we can propagate known 597 // bits from the RHS to V. 598 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 599 Known.One |= RHSKnown.One & BKnown.Zero; 600 // assume(~(v | b) = a) 601 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 602 m_Value(A))) && 603 Pred == ICmpInst::ICMP_EQ && 604 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 605 KnownBits RHSKnown(BitWidth); 606 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 607 KnownBits BKnown(BitWidth); 608 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 609 610 // For those bits in B that are known to be zero, we can propagate 611 // inverted known bits from the RHS to V. 612 Known.Zero |= RHSKnown.One & BKnown.Zero; 613 Known.One |= RHSKnown.Zero & BKnown.Zero; 614 // assume(v ^ b = a) 615 } else if (match(Arg, 616 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 617 Pred == ICmpInst::ICMP_EQ && 618 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 619 KnownBits RHSKnown(BitWidth); 620 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 621 KnownBits BKnown(BitWidth); 622 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 623 624 // For those bits in B that are known to be zero, we can propagate known 625 // bits from the RHS to V. For those bits in B that are known to be one, 626 // we can propagate inverted known bits from the RHS to V. 627 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 628 Known.One |= RHSKnown.One & BKnown.Zero; 629 Known.Zero |= RHSKnown.One & BKnown.One; 630 Known.One |= RHSKnown.Zero & BKnown.One; 631 // assume(~(v ^ b) = a) 632 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 633 m_Value(A))) && 634 Pred == ICmpInst::ICMP_EQ && 635 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 636 KnownBits RHSKnown(BitWidth); 637 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 638 KnownBits BKnown(BitWidth); 639 computeKnownBits(B, BKnown, Depth+1, Query(Q, I)); 640 641 // For those bits in B that are known to be zero, we can propagate 642 // inverted known bits from the RHS to V. For those bits in B that are 643 // known to be one, we can propagate known bits from the RHS to V. 644 Known.Zero |= RHSKnown.One & BKnown.Zero; 645 Known.One |= RHSKnown.Zero & BKnown.Zero; 646 Known.Zero |= RHSKnown.Zero & BKnown.One; 647 Known.One |= RHSKnown.One & BKnown.One; 648 // assume(v << c = a) 649 } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 650 m_Value(A))) && 651 Pred == ICmpInst::ICMP_EQ && 652 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 653 KnownBits RHSKnown(BitWidth); 654 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 655 // For those bits in RHS that are known, we can propagate them to known 656 // bits in V shifted to the right by C. 657 RHSKnown.Zero.lshrInPlace(C->getZExtValue()); 658 Known.Zero |= RHSKnown.Zero; 659 RHSKnown.One.lshrInPlace(C->getZExtValue()); 660 Known.One |= RHSKnown.One; 661 // assume(~(v << c) = a) 662 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 663 m_Value(A))) && 664 Pred == ICmpInst::ICMP_EQ && 665 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 666 KnownBits RHSKnown(BitWidth); 667 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 668 // For those bits in RHS that are known, we can propagate them inverted 669 // to known bits in V shifted to the right by C. 670 RHSKnown.One.lshrInPlace(C->getZExtValue()); 671 Known.Zero |= RHSKnown.One; 672 RHSKnown.Zero.lshrInPlace(C->getZExtValue()); 673 Known.One |= RHSKnown.Zero; 674 // assume(v >> c = a) 675 } else if (match(Arg, 676 m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), 677 m_Value(A))) && 678 Pred == ICmpInst::ICMP_EQ && 679 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 680 KnownBits RHSKnown(BitWidth); 681 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 682 // For those bits in RHS that are known, we can propagate them to known 683 // bits in V shifted to the right by C. 684 Known.Zero |= RHSKnown.Zero << C->getZExtValue(); 685 Known.One |= RHSKnown.One << C->getZExtValue(); 686 // assume(~(v >> c) = a) 687 } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), 688 m_Value(A))) && 689 Pred == ICmpInst::ICMP_EQ && 690 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 691 KnownBits RHSKnown(BitWidth); 692 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 693 // For those bits in RHS that are known, we can propagate them inverted 694 // to known bits in V shifted to the right by C. 695 Known.Zero |= RHSKnown.One << C->getZExtValue(); 696 Known.One |= RHSKnown.Zero << C->getZExtValue(); 697 // assume(v >=_s c) where c is non-negative 698 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 699 Pred == ICmpInst::ICMP_SGE && 700 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 701 KnownBits RHSKnown(BitWidth); 702 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 703 704 if (RHSKnown.isNonNegative()) { 705 // We know that the sign bit is zero. 706 Known.makeNonNegative(); 707 } 708 // assume(v >_s c) where c is at least -1. 709 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 710 Pred == ICmpInst::ICMP_SGT && 711 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 712 KnownBits RHSKnown(BitWidth); 713 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 714 715 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { 716 // We know that the sign bit is zero. 717 Known.makeNonNegative(); 718 } 719 // assume(v <=_s c) where c is negative 720 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 721 Pred == ICmpInst::ICMP_SLE && 722 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 723 KnownBits RHSKnown(BitWidth); 724 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 725 726 if (RHSKnown.isNegative()) { 727 // We know that the sign bit is one. 728 Known.makeNegative(); 729 } 730 // assume(v <_s c) where c is non-positive 731 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 732 Pred == ICmpInst::ICMP_SLT && 733 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 734 KnownBits RHSKnown(BitWidth); 735 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 736 737 if (RHSKnown.isZero() || RHSKnown.isNegative()) { 738 // We know that the sign bit is one. 739 Known.makeNegative(); 740 } 741 // assume(v <=_u c) 742 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 743 Pred == ICmpInst::ICMP_ULE && 744 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 745 KnownBits RHSKnown(BitWidth); 746 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 747 748 // Whatever high bits in c are zero are known to be zero. 749 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 750 // assume(v <_u c) 751 } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) && 752 Pred == ICmpInst::ICMP_ULT && 753 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 754 KnownBits RHSKnown(BitWidth); 755 computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I)); 756 757 // Whatever high bits in c are zero are known to be zero (if c is a power 758 // of 2, then one more). 759 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) 760 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); 761 else 762 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 763 } 764 } 765 766 // If assumptions conflict with each other or previous known bits, then we 767 // have a logical fallacy. It's possible that the assumption is not reachable, 768 // so this isn't a real bug. On the other hand, the program may have undefined 769 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 770 // clear out the known bits, try to warn the user, and hope for the best. 771 if (Known.Zero.intersects(Known.One)) { 772 Known.resetAll(); 773 774 if (Q.ORE) 775 Q.ORE->emit([&]() { 776 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 777 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", 778 CxtI) 779 << "Detected conflicting code assumptions. Program may " 780 "have undefined behavior, or compiler may have " 781 "internal error."; 782 }); 783 } 784 } 785 786 /// Compute known bits from a shift operator, including those with a 787 /// non-constant shift amount. Known is the output of this function. Known2 is a 788 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are 789 /// operator-specific functors that, given the known-zero or known-one bits 790 /// respectively, and a shift amount, compute the implied known-zero or 791 /// known-one bits of the shift operator's result respectively for that shift 792 /// amount. The results from calling KZF and KOF are conservatively combined for 793 /// all permitted shift amounts. 794 static void computeKnownBitsFromShiftOperator( 795 const Operator *I, KnownBits &Known, KnownBits &Known2, 796 unsigned Depth, const Query &Q, 797 function_ref<APInt(const APInt &, unsigned)> KZF, 798 function_ref<APInt(const APInt &, unsigned)> KOF) { 799 unsigned BitWidth = Known.getBitWidth(); 800 801 if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 802 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); 803 804 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 805 Known.Zero = KZF(Known.Zero, ShiftAmt); 806 Known.One = KOF(Known.One, ShiftAmt); 807 // If the known bits conflict, this must be an overflowing left shift, so 808 // the shift result is poison. We can return anything we want. Choose 0 for 809 // the best folding opportunity. 810 if (Known.hasConflict()) 811 Known.setAllZero(); 812 813 return; 814 } 815 816 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 817 818 // If the shift amount could be greater than or equal to the bit-width of the 819 // LHS, the value could be poison, but bail out because the check below is 820 // expensive. TODO: Should we just carry on? 821 if ((~Known.Zero).uge(BitWidth)) { 822 Known.resetAll(); 823 return; 824 } 825 826 // Note: We cannot use Known.Zero.getLimitedValue() here, because if 827 // BitWidth > 64 and any upper bits are known, we'll end up returning the 828 // limit value (which implies all bits are known). 829 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); 830 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); 831 832 // It would be more-clearly correct to use the two temporaries for this 833 // calculation. Reusing the APInts here to prevent unnecessary allocations. 834 Known.resetAll(); 835 836 // If we know the shifter operand is nonzero, we can sometimes infer more 837 // known bits. However this is expensive to compute, so be lazy about it and 838 // only compute it when absolutely necessary. 839 Optional<bool> ShifterOperandIsNonZero; 840 841 // Early exit if we can't constrain any well-defined shift amount. 842 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && 843 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { 844 ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q); 845 if (!*ShifterOperandIsNonZero) 846 return; 847 } 848 849 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 850 851 Known.Zero.setAllBits(); 852 Known.One.setAllBits(); 853 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 854 // Combine the shifted known input bits only for those shift amounts 855 // compatible with its known constraints. 856 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 857 continue; 858 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 859 continue; 860 // If we know the shifter is nonzero, we may be able to infer more known 861 // bits. This check is sunk down as far as possible to avoid the expensive 862 // call to isKnownNonZero if the cheaper checks above fail. 863 if (ShiftAmt == 0) { 864 if (!ShifterOperandIsNonZero.hasValue()) 865 ShifterOperandIsNonZero = 866 isKnownNonZero(I->getOperand(1), Depth + 1, Q); 867 if (*ShifterOperandIsNonZero) 868 continue; 869 } 870 871 Known.Zero &= KZF(Known2.Zero, ShiftAmt); 872 Known.One &= KOF(Known2.One, ShiftAmt); 873 } 874 875 // If the known bits conflict, the result is poison. Return a 0 and hope the 876 // caller can further optimize that. 877 if (Known.hasConflict()) 878 Known.setAllZero(); 879 } 880 881 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known, 882 unsigned Depth, const Query &Q) { 883 unsigned BitWidth = Known.getBitWidth(); 884 885 KnownBits Known2(Known); 886 switch (I->getOpcode()) { 887 default: break; 888 case Instruction::Load: 889 if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range)) 890 computeKnownBitsFromRangeMetadata(*MD, Known); 891 break; 892 case Instruction::And: { 893 // If either the LHS or the RHS are Zero, the result is zero. 894 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 895 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 896 897 // Output known-1 bits are only known if set in both the LHS & RHS. 898 Known.One &= Known2.One; 899 // Output known-0 are known to be clear if zero in either the LHS | RHS. 900 Known.Zero |= Known2.Zero; 901 902 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 903 // here we handle the more general case of adding any odd number by 904 // matching the form add(x, add(x, y)) where y is odd. 905 // TODO: This could be generalized to clearing any bit set in y where the 906 // following bit is known to be unset in y. 907 Value *Y = nullptr; 908 if (!Known.Zero[0] && !Known.One[0] && 909 (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)), 910 m_Value(Y))) || 911 match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)), 912 m_Value(Y))))) { 913 Known2.resetAll(); 914 computeKnownBits(Y, Known2, Depth + 1, Q); 915 if (Known2.countMinTrailingOnes() > 0) 916 Known.Zero.setBit(0); 917 } 918 break; 919 } 920 case Instruction::Or: 921 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 922 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 923 924 // Output known-0 bits are only known if clear in both the LHS & RHS. 925 Known.Zero &= Known2.Zero; 926 // Output known-1 are known to be set if set in either the LHS | RHS. 927 Known.One |= Known2.One; 928 break; 929 case Instruction::Xor: { 930 computeKnownBits(I->getOperand(1), Known, Depth + 1, Q); 931 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 932 933 // Output known-0 bits are known if clear or set in both the LHS & RHS. 934 APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One); 935 // Output known-1 are known to be set if set in only one of the LHS, RHS. 936 Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero); 937 Known.Zero = std::move(KnownZeroOut); 938 break; 939 } 940 case Instruction::Mul: { 941 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 942 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known, 943 Known2, Depth, Q); 944 break; 945 } 946 case Instruction::UDiv: { 947 // For the purposes of computing leading zeros we can conservatively 948 // treat a udiv as a logical right shift by the power of 2 known to 949 // be less than the denominator. 950 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 951 unsigned LeadZ = Known2.countMinLeadingZeros(); 952 953 Known2.resetAll(); 954 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 955 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 956 if (RHSMaxLeadingZeros != BitWidth) 957 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 958 959 Known.Zero.setHighBits(LeadZ); 960 break; 961 } 962 case Instruction::Select: { 963 const Value *LHS, *RHS; 964 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 965 if (SelectPatternResult::isMinOrMax(SPF)) { 966 computeKnownBits(RHS, Known, Depth + 1, Q); 967 computeKnownBits(LHS, Known2, Depth + 1, Q); 968 } else { 969 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 970 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 971 } 972 973 unsigned MaxHighOnes = 0; 974 unsigned MaxHighZeros = 0; 975 if (SPF == SPF_SMAX) { 976 // If both sides are negative, the result is negative. 977 if (Known.isNegative() && Known2.isNegative()) 978 // We can derive a lower bound on the result by taking the max of the 979 // leading one bits. 980 MaxHighOnes = 981 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 982 // If either side is non-negative, the result is non-negative. 983 else if (Known.isNonNegative() || Known2.isNonNegative()) 984 MaxHighZeros = 1; 985 } else if (SPF == SPF_SMIN) { 986 // If both sides are non-negative, the result is non-negative. 987 if (Known.isNonNegative() && Known2.isNonNegative()) 988 // We can derive an upper bound on the result by taking the max of the 989 // leading zero bits. 990 MaxHighZeros = std::max(Known.countMinLeadingZeros(), 991 Known2.countMinLeadingZeros()); 992 // If either side is negative, the result is negative. 993 else if (Known.isNegative() || Known2.isNegative()) 994 MaxHighOnes = 1; 995 } else if (SPF == SPF_UMAX) { 996 // We can derive a lower bound on the result by taking the max of the 997 // leading one bits. 998 MaxHighOnes = 999 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1000 } else if (SPF == SPF_UMIN) { 1001 // We can derive an upper bound on the result by taking the max of the 1002 // leading zero bits. 1003 MaxHighZeros = 1004 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1005 } 1006 1007 // Only known if known in both the LHS and RHS. 1008 Known.One &= Known2.One; 1009 Known.Zero &= Known2.Zero; 1010 if (MaxHighOnes > 0) 1011 Known.One.setHighBits(MaxHighOnes); 1012 if (MaxHighZeros > 0) 1013 Known.Zero.setHighBits(MaxHighZeros); 1014 break; 1015 } 1016 case Instruction::FPTrunc: 1017 case Instruction::FPExt: 1018 case Instruction::FPToUI: 1019 case Instruction::FPToSI: 1020 case Instruction::SIToFP: 1021 case Instruction::UIToFP: 1022 break; // Can't work with floating point. 1023 case Instruction::PtrToInt: 1024 case Instruction::IntToPtr: 1025 // Fall through and handle them the same as zext/trunc. 1026 LLVM_FALLTHROUGH; 1027 case Instruction::ZExt: 1028 case Instruction::Trunc: { 1029 Type *SrcTy = I->getOperand(0)->getType(); 1030 1031 unsigned SrcBitWidth; 1032 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1033 // which fall through here. 1034 SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType()); 1035 1036 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1037 Known = Known.zextOrTrunc(SrcBitWidth); 1038 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1039 Known = Known.zextOrTrunc(BitWidth); 1040 // Any top bits are known to be zero. 1041 if (BitWidth > SrcBitWidth) 1042 Known.Zero.setBitsFrom(SrcBitWidth); 1043 break; 1044 } 1045 case Instruction::BitCast: { 1046 Type *SrcTy = I->getOperand(0)->getType(); 1047 if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) && 1048 // TODO: For now, not handling conversions like: 1049 // (bitcast i64 %x to <2 x i32>) 1050 !I->getType()->isVectorTy()) { 1051 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1052 break; 1053 } 1054 break; 1055 } 1056 case Instruction::SExt: { 1057 // Compute the bits in the result that are not present in the input. 1058 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1059 1060 Known = Known.trunc(SrcBitWidth); 1061 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1062 // If the sign bit of the input is known set or clear, then we know the 1063 // top bits of the result. 1064 Known = Known.sext(BitWidth); 1065 break; 1066 } 1067 case Instruction::Shl: { 1068 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1069 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1070 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) { 1071 APInt KZResult = KnownZero << ShiftAmt; 1072 KZResult.setLowBits(ShiftAmt); // Low bits known 0. 1073 // If this shift has "nsw" keyword, then the result is either a poison 1074 // value or has the same sign bit as the first operand. 1075 if (NSW && KnownZero.isSignBitSet()) 1076 KZResult.setSignBit(); 1077 return KZResult; 1078 }; 1079 1080 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) { 1081 APInt KOResult = KnownOne << ShiftAmt; 1082 if (NSW && KnownOne.isSignBitSet()) 1083 KOResult.setSignBit(); 1084 return KOResult; 1085 }; 1086 1087 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1088 break; 1089 } 1090 case Instruction::LShr: { 1091 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1092 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1093 APInt KZResult = KnownZero.lshr(ShiftAmt); 1094 // High bits known zero. 1095 KZResult.setHighBits(ShiftAmt); 1096 return KZResult; 1097 }; 1098 1099 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1100 return KnownOne.lshr(ShiftAmt); 1101 }; 1102 1103 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1104 break; 1105 } 1106 case Instruction::AShr: { 1107 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1108 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1109 return KnownZero.ashr(ShiftAmt); 1110 }; 1111 1112 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1113 return KnownOne.ashr(ShiftAmt); 1114 }; 1115 1116 computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF); 1117 break; 1118 } 1119 case Instruction::Sub: { 1120 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1121 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1122 Known, Known2, Depth, Q); 1123 break; 1124 } 1125 case Instruction::Add: { 1126 bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap(); 1127 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1128 Known, Known2, Depth, Q); 1129 break; 1130 } 1131 case Instruction::SRem: 1132 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1133 APInt RA = Rem->getValue().abs(); 1134 if (RA.isPowerOf2()) { 1135 APInt LowBits = RA - 1; 1136 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1137 1138 // The low bits of the first operand are unchanged by the srem. 1139 Known.Zero = Known2.Zero & LowBits; 1140 Known.One = Known2.One & LowBits; 1141 1142 // If the first operand is non-negative or has all low bits zero, then 1143 // the upper bits are all zero. 1144 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 1145 Known.Zero |= ~LowBits; 1146 1147 // If the first operand is negative and not all low bits are zero, then 1148 // the upper bits are all one. 1149 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 1150 Known.One |= ~LowBits; 1151 1152 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1153 break; 1154 } 1155 } 1156 1157 // The sign bit is the LHS's sign bit, except when the result of the 1158 // remainder is zero. 1159 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1160 // If it's known zero, our sign bit is also zero. 1161 if (Known2.isNonNegative()) 1162 Known.makeNonNegative(); 1163 1164 break; 1165 case Instruction::URem: { 1166 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1167 const APInt &RA = Rem->getValue(); 1168 if (RA.isPowerOf2()) { 1169 APInt LowBits = (RA - 1); 1170 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1171 Known.Zero |= ~LowBits; 1172 Known.One &= LowBits; 1173 break; 1174 } 1175 } 1176 1177 // Since the result is less than or equal to either operand, any leading 1178 // zero bits in either operand must also exist in the result. 1179 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1180 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1181 1182 unsigned Leaders = 1183 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1184 Known.resetAll(); 1185 Known.Zero.setHighBits(Leaders); 1186 break; 1187 } 1188 1189 case Instruction::Alloca: { 1190 const AllocaInst *AI = cast<AllocaInst>(I); 1191 unsigned Align = AI->getAlignment(); 1192 if (Align == 0) 1193 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); 1194 1195 if (Align > 0) 1196 Known.Zero.setLowBits(countTrailingZeros(Align)); 1197 break; 1198 } 1199 case Instruction::GetElementPtr: { 1200 // Analyze all of the subscripts of this getelementptr instruction 1201 // to determine if we can prove known low zero bits. 1202 KnownBits LocalKnown(BitWidth); 1203 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q); 1204 unsigned TrailZ = LocalKnown.countMinTrailingZeros(); 1205 1206 gep_type_iterator GTI = gep_type_begin(I); 1207 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1208 Value *Index = I->getOperand(i); 1209 if (StructType *STy = GTI.getStructTypeOrNull()) { 1210 // Handle struct member offset arithmetic. 1211 1212 // Handle case when index is vector zeroinitializer 1213 Constant *CIndex = cast<Constant>(Index); 1214 if (CIndex->isZeroValue()) 1215 continue; 1216 1217 if (CIndex->getType()->isVectorTy()) 1218 Index = CIndex->getSplatValue(); 1219 1220 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1221 const StructLayout *SL = Q.DL.getStructLayout(STy); 1222 uint64_t Offset = SL->getElementOffset(Idx); 1223 TrailZ = std::min<unsigned>(TrailZ, 1224 countTrailingZeros(Offset)); 1225 } else { 1226 // Handle array index arithmetic. 1227 Type *IndexedTy = GTI.getIndexedType(); 1228 if (!IndexedTy->isSized()) { 1229 TrailZ = 0; 1230 break; 1231 } 1232 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1233 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1234 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0); 1235 computeKnownBits(Index, LocalKnown, Depth + 1, Q); 1236 TrailZ = std::min(TrailZ, 1237 unsigned(countTrailingZeros(TypeSize) + 1238 LocalKnown.countMinTrailingZeros())); 1239 } 1240 } 1241 1242 Known.Zero.setLowBits(TrailZ); 1243 break; 1244 } 1245 case Instruction::PHI: { 1246 const PHINode *P = cast<PHINode>(I); 1247 // Handle the case of a simple two-predecessor recurrence PHI. 1248 // There's a lot more that could theoretically be done here, but 1249 // this is sufficient to catch some interesting cases. 1250 if (P->getNumIncomingValues() == 2) { 1251 for (unsigned i = 0; i != 2; ++i) { 1252 Value *L = P->getIncomingValue(i); 1253 Value *R = P->getIncomingValue(!i); 1254 Operator *LU = dyn_cast<Operator>(L); 1255 if (!LU) 1256 continue; 1257 unsigned Opcode = LU->getOpcode(); 1258 // Check for operations that have the property that if 1259 // both their operands have low zero bits, the result 1260 // will have low zero bits. 1261 if (Opcode == Instruction::Add || 1262 Opcode == Instruction::Sub || 1263 Opcode == Instruction::And || 1264 Opcode == Instruction::Or || 1265 Opcode == Instruction::Mul) { 1266 Value *LL = LU->getOperand(0); 1267 Value *LR = LU->getOperand(1); 1268 // Find a recurrence. 1269 if (LL == I) 1270 L = LR; 1271 else if (LR == I) 1272 L = LL; 1273 else 1274 break; 1275 // Ok, we have a PHI of the form L op= R. Check for low 1276 // zero bits. 1277 computeKnownBits(R, Known2, Depth + 1, Q); 1278 1279 // We need to take the minimum number of known bits 1280 KnownBits Known3(Known); 1281 computeKnownBits(L, Known3, Depth + 1, Q); 1282 1283 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1284 Known3.countMinTrailingZeros())); 1285 1286 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU); 1287 if (OverflowOp && OverflowOp->hasNoSignedWrap()) { 1288 // If initial value of recurrence is nonnegative, and we are adding 1289 // a nonnegative number with nsw, the result can only be nonnegative 1290 // or poison value regardless of the number of times we execute the 1291 // add in phi recurrence. If initial value is negative and we are 1292 // adding a negative number with nsw, the result can only be 1293 // negative or poison value. Similar arguments apply to sub and mul. 1294 // 1295 // (add non-negative, non-negative) --> non-negative 1296 // (add negative, negative) --> negative 1297 if (Opcode == Instruction::Add) { 1298 if (Known2.isNonNegative() && Known3.isNonNegative()) 1299 Known.makeNonNegative(); 1300 else if (Known2.isNegative() && Known3.isNegative()) 1301 Known.makeNegative(); 1302 } 1303 1304 // (sub nsw non-negative, negative) --> non-negative 1305 // (sub nsw negative, non-negative) --> negative 1306 else if (Opcode == Instruction::Sub && LL == I) { 1307 if (Known2.isNonNegative() && Known3.isNegative()) 1308 Known.makeNonNegative(); 1309 else if (Known2.isNegative() && Known3.isNonNegative()) 1310 Known.makeNegative(); 1311 } 1312 1313 // (mul nsw non-negative, non-negative) --> non-negative 1314 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1315 Known3.isNonNegative()) 1316 Known.makeNonNegative(); 1317 } 1318 1319 break; 1320 } 1321 } 1322 } 1323 1324 // Unreachable blocks may have zero-operand PHI nodes. 1325 if (P->getNumIncomingValues() == 0) 1326 break; 1327 1328 // Otherwise take the unions of the known bit sets of the operands, 1329 // taking conservative care to avoid excessive recursion. 1330 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) { 1331 // Skip if every incoming value references to ourself. 1332 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1333 break; 1334 1335 Known.Zero.setAllBits(); 1336 Known.One.setAllBits(); 1337 for (Value *IncValue : P->incoming_values()) { 1338 // Skip direct self references. 1339 if (IncValue == P) continue; 1340 1341 Known2 = KnownBits(BitWidth); 1342 // Recurse, but cap the recursion to one level, because we don't 1343 // want to waste time spinning around in loops. 1344 computeKnownBits(IncValue, Known2, MaxDepth - 1, Q); 1345 Known.Zero &= Known2.Zero; 1346 Known.One &= Known2.One; 1347 // If all bits have been ruled out, there's no need to check 1348 // more operands. 1349 if (!Known.Zero && !Known.One) 1350 break; 1351 } 1352 } 1353 break; 1354 } 1355 case Instruction::Call: 1356 case Instruction::Invoke: 1357 // If range metadata is attached to this call, set known bits from that, 1358 // and then intersect with known bits based on other properties of the 1359 // function. 1360 if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range)) 1361 computeKnownBitsFromRangeMetadata(*MD, Known); 1362 if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) { 1363 computeKnownBits(RV, Known2, Depth + 1, Q); 1364 Known.Zero |= Known2.Zero; 1365 Known.One |= Known2.One; 1366 } 1367 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1368 switch (II->getIntrinsicID()) { 1369 default: break; 1370 case Intrinsic::bitreverse: 1371 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1372 Known.Zero |= Known2.Zero.reverseBits(); 1373 Known.One |= Known2.One.reverseBits(); 1374 break; 1375 case Intrinsic::bswap: 1376 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1377 Known.Zero |= Known2.Zero.byteSwap(); 1378 Known.One |= Known2.One.byteSwap(); 1379 break; 1380 case Intrinsic::ctlz: { 1381 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1382 // If we have a known 1, its position is our upper bound. 1383 unsigned PossibleLZ = Known2.One.countLeadingZeros(); 1384 // If this call is undefined for 0, the result will be less than 2^n. 1385 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1386 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1387 unsigned LowBits = Log2_32(PossibleLZ)+1; 1388 Known.Zero.setBitsFrom(LowBits); 1389 break; 1390 } 1391 case Intrinsic::cttz: { 1392 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1393 // If we have a known 1, its position is our upper bound. 1394 unsigned PossibleTZ = Known2.One.countTrailingZeros(); 1395 // If this call is undefined for 0, the result will be less than 2^n. 1396 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1397 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1398 unsigned LowBits = Log2_32(PossibleTZ)+1; 1399 Known.Zero.setBitsFrom(LowBits); 1400 break; 1401 } 1402 case Intrinsic::ctpop: { 1403 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1404 // We can bound the space the count needs. Also, bits known to be zero 1405 // can't contribute to the population. 1406 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1407 unsigned LowBits = Log2_32(BitsPossiblySet)+1; 1408 Known.Zero.setBitsFrom(LowBits); 1409 // TODO: we could bound KnownOne using the lower bound on the number 1410 // of bits which might be set provided by popcnt KnownOne2. 1411 break; 1412 } 1413 case Intrinsic::x86_sse42_crc32_64_64: 1414 Known.Zero.setBitsFrom(32); 1415 break; 1416 } 1417 } 1418 break; 1419 case Instruction::ExtractElement: 1420 // Look through extract element. At the moment we keep this simple and skip 1421 // tracking the specific element. But at least we might find information 1422 // valid for all elements of the vector (for example if vector is sign 1423 // extended, shifted, etc). 1424 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1425 break; 1426 case Instruction::ExtractValue: 1427 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1428 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1429 if (EVI->getNumIndices() != 1) break; 1430 if (EVI->getIndices()[0] == 0) { 1431 switch (II->getIntrinsicID()) { 1432 default: break; 1433 case Intrinsic::uadd_with_overflow: 1434 case Intrinsic::sadd_with_overflow: 1435 computeKnownBitsAddSub(true, II->getArgOperand(0), 1436 II->getArgOperand(1), false, Known, Known2, 1437 Depth, Q); 1438 break; 1439 case Intrinsic::usub_with_overflow: 1440 case Intrinsic::ssub_with_overflow: 1441 computeKnownBitsAddSub(false, II->getArgOperand(0), 1442 II->getArgOperand(1), false, Known, Known2, 1443 Depth, Q); 1444 break; 1445 case Intrinsic::umul_with_overflow: 1446 case Intrinsic::smul_with_overflow: 1447 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1448 Known, Known2, Depth, Q); 1449 break; 1450 } 1451 } 1452 } 1453 } 1454 } 1455 1456 /// Determine which bits of V are known to be either zero or one and return 1457 /// them. 1458 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { 1459 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1460 computeKnownBits(V, Known, Depth, Q); 1461 return Known; 1462 } 1463 1464 /// Determine which bits of V are known to be either zero or one and return 1465 /// them in the Known bit set. 1466 /// 1467 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1468 /// we cannot optimize based on the assumption that it is zero without changing 1469 /// it to be an explicit zero. If we don't change it to zero, other code could 1470 /// optimized based on the contradictory assumption that it is non-zero. 1471 /// Because instcombine aggressively folds operations with undef args anyway, 1472 /// this won't lose us code quality. 1473 /// 1474 /// This function is defined on values with integer type, values with pointer 1475 /// type, and vectors of integers. In the case 1476 /// where V is a vector, known zero, and known one values are the 1477 /// same width as the vector element, and the bit is set only if it is true 1478 /// for all of the elements in the vector. 1479 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 1480 const Query &Q) { 1481 assert(V && "No Value?"); 1482 assert(Depth <= MaxDepth && "Limit Search Depth"); 1483 unsigned BitWidth = Known.getBitWidth(); 1484 1485 assert((V->getType()->isIntOrIntVectorTy(BitWidth) || 1486 V->getType()->isPtrOrPtrVectorTy()) && 1487 "Not integer or pointer type!"); 1488 assert(Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth && 1489 "V and Known should have same BitWidth"); 1490 (void)BitWidth; 1491 1492 const APInt *C; 1493 if (match(V, m_APInt(C))) { 1494 // We know all of the bits for a scalar constant or a splat vector constant! 1495 Known.One = *C; 1496 Known.Zero = ~Known.One; 1497 return; 1498 } 1499 // Null and aggregate-zero are all-zeros. 1500 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1501 Known.setAllZero(); 1502 return; 1503 } 1504 // Handle a constant vector by taking the intersection of the known bits of 1505 // each element. 1506 if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) { 1507 // We know that CDS must be a vector of integers. Take the intersection of 1508 // each element. 1509 Known.Zero.setAllBits(); Known.One.setAllBits(); 1510 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { 1511 APInt Elt = CDS->getElementAsAPInt(i); 1512 Known.Zero &= ~Elt; 1513 Known.One &= Elt; 1514 } 1515 return; 1516 } 1517 1518 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1519 // We know that CV must be a vector of integers. Take the intersection of 1520 // each element. 1521 Known.Zero.setAllBits(); Known.One.setAllBits(); 1522 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1523 Constant *Element = CV->getAggregateElement(i); 1524 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1525 if (!ElementCI) { 1526 Known.resetAll(); 1527 return; 1528 } 1529 const APInt &Elt = ElementCI->getValue(); 1530 Known.Zero &= ~Elt; 1531 Known.One &= Elt; 1532 } 1533 return; 1534 } 1535 1536 // Start out not knowing anything. 1537 Known.resetAll(); 1538 1539 // We can't imply anything about undefs. 1540 if (isa<UndefValue>(V)) 1541 return; 1542 1543 // There's no point in looking through other users of ConstantData for 1544 // assumptions. Confirm that we've handled them all. 1545 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1546 1547 // Limit search depth. 1548 // All recursive calls that increase depth must come after this. 1549 if (Depth == MaxDepth) 1550 return; 1551 1552 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1553 // the bits of its aliasee. 1554 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1555 if (!GA->isInterposable()) 1556 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 1557 return; 1558 } 1559 1560 if (const Operator *I = dyn_cast<Operator>(V)) 1561 computeKnownBitsFromOperator(I, Known, Depth, Q); 1562 1563 // Aligned pointers have trailing zeros - refine Known.Zero set 1564 if (V->getType()->isPointerTy()) { 1565 unsigned Align = V->getPointerAlignment(Q.DL); 1566 if (Align) 1567 Known.Zero.setLowBits(countTrailingZeros(Align)); 1568 } 1569 1570 // computeKnownBitsFromAssume strictly refines Known. 1571 // Therefore, we run them after computeKnownBitsFromOperator. 1572 1573 // Check whether a nearby assume intrinsic can determine some known bits. 1574 computeKnownBitsFromAssume(V, Known, Depth, Q); 1575 1576 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1577 } 1578 1579 /// Return true if the given value is known to have exactly one 1580 /// bit set when defined. For vectors return true if every element is known to 1581 /// be a power of two when defined. Supports values with integer or pointer 1582 /// types and vectors of integers. 1583 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 1584 const Query &Q) { 1585 assert(Depth <= MaxDepth && "Limit Search Depth"); 1586 1587 if (const Constant *C = dyn_cast<Constant>(V)) { 1588 if (C->isNullValue()) 1589 return OrZero; 1590 1591 const APInt *ConstIntOrConstSplatInt; 1592 if (match(C, m_APInt(ConstIntOrConstSplatInt))) 1593 return ConstIntOrConstSplatInt->isPowerOf2(); 1594 } 1595 1596 // 1 << X is clearly a power of two if the one is not shifted off the end. If 1597 // it is shifted off the end then the result is undefined. 1598 if (match(V, m_Shl(m_One(), m_Value()))) 1599 return true; 1600 1601 // (signmask) >>l X is clearly a power of two if the one is not shifted off 1602 // the bottom. If it is shifted off the bottom then the result is undefined. 1603 if (match(V, m_LShr(m_SignMask(), m_Value()))) 1604 return true; 1605 1606 // The remaining tests are all recursive, so bail out if we hit the limit. 1607 if (Depth++ == MaxDepth) 1608 return false; 1609 1610 Value *X = nullptr, *Y = nullptr; 1611 // A shift left or a logical shift right of a power of two is a power of two 1612 // or zero. 1613 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 1614 match(V, m_LShr(m_Value(X), m_Value())))) 1615 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 1616 1617 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 1618 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 1619 1620 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 1621 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 1622 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 1623 1624 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 1625 // A power of two and'd with anything is a power of two or zero. 1626 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 1627 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 1628 return true; 1629 // X & (-X) is always a power of two or zero. 1630 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 1631 return true; 1632 return false; 1633 } 1634 1635 // Adding a power-of-two or zero to the same power-of-two or zero yields 1636 // either the original power-of-two, a larger power-of-two or zero. 1637 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1638 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 1639 if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) { 1640 if (match(X, m_And(m_Specific(Y), m_Value())) || 1641 match(X, m_And(m_Value(), m_Specific(Y)))) 1642 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 1643 return true; 1644 if (match(Y, m_And(m_Specific(X), m_Value())) || 1645 match(Y, m_And(m_Value(), m_Specific(X)))) 1646 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 1647 return true; 1648 1649 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 1650 KnownBits LHSBits(BitWidth); 1651 computeKnownBits(X, LHSBits, Depth, Q); 1652 1653 KnownBits RHSBits(BitWidth); 1654 computeKnownBits(Y, RHSBits, Depth, Q); 1655 // If i8 V is a power of two or zero: 1656 // ZeroBits: 1 1 1 0 1 1 1 1 1657 // ~ZeroBits: 0 0 0 1 0 0 0 0 1658 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 1659 // If OrZero isn't set, we cannot give back a zero result. 1660 // Make sure either the LHS or RHS has a bit set. 1661 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 1662 return true; 1663 } 1664 } 1665 1666 // An exact divide or right shift can only shift off zero bits, so the result 1667 // is a power of two only if the first operand is a power of two and not 1668 // copying a sign bit (sdiv int_min, 2). 1669 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 1670 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 1671 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 1672 Depth, Q); 1673 } 1674 1675 return false; 1676 } 1677 1678 /// \brief Test whether a GEP's result is known to be non-null. 1679 /// 1680 /// Uses properties inherent in a GEP to try to determine whether it is known 1681 /// to be non-null. 1682 /// 1683 /// Currently this routine does not support vector GEPs. 1684 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 1685 const Query &Q) { 1686 if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0) 1687 return false; 1688 1689 // FIXME: Support vector-GEPs. 1690 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 1691 1692 // If the base pointer is non-null, we cannot walk to a null address with an 1693 // inbounds GEP in address space zero. 1694 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 1695 return true; 1696 1697 // Walk the GEP operands and see if any operand introduces a non-zero offset. 1698 // If so, then the GEP cannot produce a null pointer, as doing so would 1699 // inherently violate the inbounds contract within address space zero. 1700 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1701 GTI != GTE; ++GTI) { 1702 // Struct types are easy -- they must always be indexed by a constant. 1703 if (StructType *STy = GTI.getStructTypeOrNull()) { 1704 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 1705 unsigned ElementIdx = OpC->getZExtValue(); 1706 const StructLayout *SL = Q.DL.getStructLayout(STy); 1707 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 1708 if (ElementOffset > 0) 1709 return true; 1710 continue; 1711 } 1712 1713 // If we have a zero-sized type, the index doesn't matter. Keep looping. 1714 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0) 1715 continue; 1716 1717 // Fast path the constant operand case both for efficiency and so we don't 1718 // increment Depth when just zipping down an all-constant GEP. 1719 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 1720 if (!OpC->isZero()) 1721 return true; 1722 continue; 1723 } 1724 1725 // We post-increment Depth here because while isKnownNonZero increments it 1726 // as well, when we pop back up that increment won't persist. We don't want 1727 // to recurse 10k times just because we have 10k GEP operands. We don't 1728 // bail completely out because we want to handle constant GEPs regardless 1729 // of depth. 1730 if (Depth++ >= MaxDepth) 1731 continue; 1732 1733 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 1734 return true; 1735 } 1736 1737 return false; 1738 } 1739 1740 static bool isKnownNonNullFromDominatingCondition(const Value *V, 1741 const Instruction *CtxI, 1742 const DominatorTree *DT) { 1743 assert(V->getType()->isPointerTy() && "V must be pointer type"); 1744 assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull"); 1745 1746 if (!CtxI || !DT) 1747 return false; 1748 1749 unsigned NumUsesExplored = 0; 1750 for (auto *U : V->users()) { 1751 // Avoid massive lists 1752 if (NumUsesExplored >= DomConditionsMaxUses) 1753 break; 1754 NumUsesExplored++; 1755 1756 // If the value is used as an argument to a call or invoke, then argument 1757 // attributes may provide an answer about null-ness. 1758 if (auto CS = ImmutableCallSite(U)) 1759 if (auto *CalledFunc = CS.getCalledFunction()) 1760 for (const Argument &Arg : CalledFunc->args()) 1761 if (CS.getArgOperand(Arg.getArgNo()) == V && 1762 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI)) 1763 return true; 1764 1765 // Consider only compare instructions uniquely controlling a branch 1766 CmpInst::Predicate Pred; 1767 if (!match(const_cast<User *>(U), 1768 m_c_ICmp(Pred, m_Specific(V), m_Zero())) || 1769 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) 1770 continue; 1771 1772 for (auto *CmpU : U->users()) { 1773 if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) { 1774 assert(BI->isConditional() && "uses a comparison!"); 1775 1776 BasicBlock *NonNullSuccessor = 1777 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); 1778 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 1779 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 1780 return true; 1781 } else if (Pred == ICmpInst::ICMP_NE && 1782 match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) && 1783 DT->dominates(cast<Instruction>(CmpU), CtxI)) { 1784 return true; 1785 } 1786 } 1787 } 1788 1789 return false; 1790 } 1791 1792 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 1793 /// ensure that the value it's attached to is never Value? 'RangeType' is 1794 /// is the type of the value described by the range. 1795 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 1796 const unsigned NumRanges = Ranges->getNumOperands() / 2; 1797 assert(NumRanges >= 1); 1798 for (unsigned i = 0; i < NumRanges; ++i) { 1799 ConstantInt *Lower = 1800 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 1801 ConstantInt *Upper = 1802 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 1803 ConstantRange Range(Lower->getValue(), Upper->getValue()); 1804 if (Range.contains(Value)) 1805 return false; 1806 } 1807 return true; 1808 } 1809 1810 /// Return true if the given value is known to be non-zero when defined. For 1811 /// vectors, return true if every element is known to be non-zero when 1812 /// defined. For pointers, if the context instruction and dominator tree are 1813 /// specified, perform context-sensitive analysis and return true if the 1814 /// pointer couldn't possibly be null at the specified instruction. 1815 /// Supports values with integer or pointer type and vectors of integers. 1816 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) { 1817 if (auto *C = dyn_cast<Constant>(V)) { 1818 if (C->isNullValue()) 1819 return false; 1820 if (isa<ConstantInt>(C)) 1821 // Must be non-zero due to null test above. 1822 return true; 1823 1824 // For constant vectors, check that all elements are undefined or known 1825 // non-zero to determine that the whole vector is known non-zero. 1826 if (auto *VecTy = dyn_cast<VectorType>(C->getType())) { 1827 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 1828 Constant *Elt = C->getAggregateElement(i); 1829 if (!Elt || Elt->isNullValue()) 1830 return false; 1831 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 1832 return false; 1833 } 1834 return true; 1835 } 1836 1837 // A global variable in address space 0 is non null unless extern weak 1838 // or an absolute symbol reference. Other address spaces may have null as a 1839 // valid address for a global, so we can't assume anything. 1840 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 1841 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 1842 GV->getType()->getAddressSpace() == 0) 1843 return true; 1844 } else 1845 return false; 1846 } 1847 1848 if (auto *I = dyn_cast<Instruction>(V)) { 1849 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) { 1850 // If the possible ranges don't contain zero, then the value is 1851 // definitely non-zero. 1852 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 1853 const APInt ZeroValue(Ty->getBitWidth(), 0); 1854 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 1855 return true; 1856 } 1857 } 1858 } 1859 1860 // Check for pointer simplifications. 1861 if (V->getType()->isPointerTy()) { 1862 // Alloca never returns null, malloc might. 1863 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) 1864 return true; 1865 1866 // A byval, inalloca, or nonnull argument is never null. 1867 if (const Argument *A = dyn_cast<Argument>(V)) 1868 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr()) 1869 return true; 1870 1871 // A Load tagged with nonnull metadata is never null. 1872 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 1873 if (LI->getMetadata(LLVMContext::MD_nonnull)) 1874 return true; 1875 1876 if (auto CS = ImmutableCallSite(V)) 1877 if (CS.isReturnNonNull()) 1878 return true; 1879 } 1880 1881 // The remaining tests are all recursive, so bail out if we hit the limit. 1882 if (Depth++ >= MaxDepth) 1883 return false; 1884 1885 // Check for recursive pointer simplifications. 1886 if (V->getType()->isPointerTy()) { 1887 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) 1888 return true; 1889 1890 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 1891 if (isGEPKnownNonNull(GEP, Depth, Q)) 1892 return true; 1893 } 1894 1895 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 1896 1897 // X | Y != 0 if X != 0 or Y != 0. 1898 Value *X = nullptr, *Y = nullptr; 1899 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 1900 return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q); 1901 1902 // ext X != 0 if X != 0. 1903 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 1904 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 1905 1906 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 1907 // if the lowest bit is shifted off the end. 1908 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { 1909 // shl nuw can't remove any non-zero bits. 1910 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1911 if (BO->hasNoUnsignedWrap()) 1912 return isKnownNonZero(X, Depth, Q); 1913 1914 KnownBits Known(BitWidth); 1915 computeKnownBits(X, Known, Depth, Q); 1916 if (Known.One[0]) 1917 return true; 1918 } 1919 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 1920 // defined if the sign bit is shifted off the end. 1921 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 1922 // shr exact can only shift out zero bits. 1923 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 1924 if (BO->isExact()) 1925 return isKnownNonZero(X, Depth, Q); 1926 1927 KnownBits Known = computeKnownBits(X, Depth, Q); 1928 if (Known.isNegative()) 1929 return true; 1930 1931 // If the shifter operand is a constant, and all of the bits shifted 1932 // out are known to be zero, and X is known non-zero then at least one 1933 // non-zero bit must remain. 1934 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 1935 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 1936 // Is there a known one in the portion not shifted out? 1937 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) 1938 return true; 1939 // Are all the bits to be shifted out known zero? 1940 if (Known.countMinTrailingZeros() >= ShiftVal) 1941 return isKnownNonZero(X, Depth, Q); 1942 } 1943 } 1944 // div exact can only produce a zero if the dividend is zero. 1945 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 1946 return isKnownNonZero(X, Depth, Q); 1947 } 1948 // X + Y. 1949 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 1950 KnownBits XKnown = computeKnownBits(X, Depth, Q); 1951 KnownBits YKnown = computeKnownBits(Y, Depth, Q); 1952 1953 // If X and Y are both non-negative (as signed values) then their sum is not 1954 // zero unless both X and Y are zero. 1955 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 1956 if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q)) 1957 return true; 1958 1959 // If X and Y are both negative (as signed values) then their sum is not 1960 // zero unless both X and Y equal INT_MIN. 1961 if (XKnown.isNegative() && YKnown.isNegative()) { 1962 APInt Mask = APInt::getSignedMaxValue(BitWidth); 1963 // The sign bit of X is set. If some other bit is set then X is not equal 1964 // to INT_MIN. 1965 if (XKnown.One.intersects(Mask)) 1966 return true; 1967 // The sign bit of Y is set. If some other bit is set then Y is not equal 1968 // to INT_MIN. 1969 if (YKnown.One.intersects(Mask)) 1970 return true; 1971 } 1972 1973 // The sum of a non-negative number and a power of two is not zero. 1974 if (XKnown.isNonNegative() && 1975 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 1976 return true; 1977 if (YKnown.isNonNegative() && 1978 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 1979 return true; 1980 } 1981 // X * Y. 1982 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 1983 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 1984 // If X and Y are non-zero then so is X * Y as long as the multiplication 1985 // does not overflow. 1986 if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) && 1987 isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q)) 1988 return true; 1989 } 1990 // (C ? X : Y) != 0 if X != 0 and Y != 0. 1991 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 1992 if (isKnownNonZero(SI->getTrueValue(), Depth, Q) && 1993 isKnownNonZero(SI->getFalseValue(), Depth, Q)) 1994 return true; 1995 } 1996 // PHI 1997 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 1998 // Try and detect a recurrence that monotonically increases from a 1999 // starting value, as these are common as induction variables. 2000 if (PN->getNumIncomingValues() == 2) { 2001 Value *Start = PN->getIncomingValue(0); 2002 Value *Induction = PN->getIncomingValue(1); 2003 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 2004 std::swap(Start, Induction); 2005 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 2006 if (!C->isZero() && !C->isNegative()) { 2007 ConstantInt *X; 2008 if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 2009 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 2010 !X->isNegative()) 2011 return true; 2012 } 2013 } 2014 } 2015 // Check if all incoming values are non-zero constant. 2016 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) { 2017 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero(); 2018 }); 2019 if (AllNonZeroConstants) 2020 return true; 2021 } 2022 2023 KnownBits Known(BitWidth); 2024 computeKnownBits(V, Known, Depth, Q); 2025 return Known.One != 0; 2026 } 2027 2028 /// Return true if V2 == V1 + X, where X is known non-zero. 2029 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { 2030 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2031 if (!BO || BO->getOpcode() != Instruction::Add) 2032 return false; 2033 Value *Op = nullptr; 2034 if (V2 == BO->getOperand(0)) 2035 Op = BO->getOperand(1); 2036 else if (V2 == BO->getOperand(1)) 2037 Op = BO->getOperand(0); 2038 else 2039 return false; 2040 return isKnownNonZero(Op, 0, Q); 2041 } 2042 2043 /// Return true if it is known that V1 != V2. 2044 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { 2045 if (V1 == V2) 2046 return false; 2047 if (V1->getType() != V2->getType()) 2048 // We can't look through casts yet. 2049 return false; 2050 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) 2051 return true; 2052 2053 if (V1->getType()->isIntOrIntVectorTy()) { 2054 // Are any known bits in V1 contradictory to known bits in V2? If V1 2055 // has a known zero where V2 has a known one, they must not be equal. 2056 KnownBits Known1 = computeKnownBits(V1, 0, Q); 2057 KnownBits Known2 = computeKnownBits(V2, 0, Q); 2058 2059 if (Known1.Zero.intersects(Known2.One) || 2060 Known2.Zero.intersects(Known1.One)) 2061 return true; 2062 } 2063 return false; 2064 } 2065 2066 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2067 /// simplify operations downstream. Mask is known to be zero for bits that V 2068 /// cannot have. 2069 /// 2070 /// This function is defined on values with integer type, values with pointer 2071 /// type, and vectors of integers. In the case 2072 /// where V is a vector, the mask, known zero, and known one values are the 2073 /// same width as the vector element, and the bit is set only if it is true 2074 /// for all of the elements in the vector. 2075 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2076 const Query &Q) { 2077 KnownBits Known(Mask.getBitWidth()); 2078 computeKnownBits(V, Known, Depth, Q); 2079 return Mask.isSubsetOf(Known.Zero); 2080 } 2081 2082 /// For vector constants, loop over the elements and find the constant with the 2083 /// minimum number of sign bits. Return 0 if the value is not a vector constant 2084 /// or if any element was not analyzed; otherwise, return the count for the 2085 /// element with the minimum number of sign bits. 2086 static unsigned computeNumSignBitsVectorConstant(const Value *V, 2087 unsigned TyBits) { 2088 const auto *CV = dyn_cast<Constant>(V); 2089 if (!CV || !CV->getType()->isVectorTy()) 2090 return 0; 2091 2092 unsigned MinSignBits = TyBits; 2093 unsigned NumElts = CV->getType()->getVectorNumElements(); 2094 for (unsigned i = 0; i != NumElts; ++i) { 2095 // If we find a non-ConstantInt, bail out. 2096 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 2097 if (!Elt) 2098 return 0; 2099 2100 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); 2101 } 2102 2103 return MinSignBits; 2104 } 2105 2106 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2107 const Query &Q); 2108 2109 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 2110 const Query &Q) { 2111 unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q); 2112 assert(Result > 0 && "At least one sign bit needs to be present!"); 2113 return Result; 2114 } 2115 2116 /// Return the number of times the sign bit of the register is replicated into 2117 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2118 /// (itself), but other cases can give us information. For example, immediately 2119 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2120 /// other, so we return 3. For vectors, return the number of sign bits for the 2121 /// vector element with the mininum number of known sign bits. 2122 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth, 2123 const Query &Q) { 2124 assert(Depth <= MaxDepth && "Limit Search Depth"); 2125 2126 // We return the minimum number of sign bits that are guaranteed to be present 2127 // in V, so for undef we have to conservatively return 1. We don't have the 2128 // same behavior for poison though -- that's a FIXME today. 2129 2130 unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType()); 2131 unsigned Tmp, Tmp2; 2132 unsigned FirstAnswer = 1; 2133 2134 // Note that ConstantInt is handled by the general computeKnownBits case 2135 // below. 2136 2137 if (Depth == MaxDepth) 2138 return 1; // Limit search depth. 2139 2140 const Operator *U = dyn_cast<Operator>(V); 2141 switch (Operator::getOpcode(V)) { 2142 default: break; 2143 case Instruction::SExt: 2144 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2145 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 2146 2147 case Instruction::SDiv: { 2148 const APInt *Denominator; 2149 // sdiv X, C -> adds log(C) sign bits. 2150 if (match(U->getOperand(1), m_APInt(Denominator))) { 2151 2152 // Ignore non-positive denominator. 2153 if (!Denominator->isStrictlyPositive()) 2154 break; 2155 2156 // Calculate the incoming numerator bits. 2157 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2158 2159 // Add floor(log(C)) bits to the numerator bits. 2160 return std::min(TyBits, NumBits + Denominator->logBase2()); 2161 } 2162 break; 2163 } 2164 2165 case Instruction::SRem: { 2166 const APInt *Denominator; 2167 // srem X, C -> we know that the result is within [-C+1,C) when C is a 2168 // positive constant. This let us put a lower bound on the number of sign 2169 // bits. 2170 if (match(U->getOperand(1), m_APInt(Denominator))) { 2171 2172 // Ignore non-positive denominator. 2173 if (!Denominator->isStrictlyPositive()) 2174 break; 2175 2176 // Calculate the incoming numerator bits. SRem by a positive constant 2177 // can't lower the number of sign bits. 2178 unsigned NumrBits = 2179 ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2180 2181 // Calculate the leading sign bit constraints by examining the 2182 // denominator. Given that the denominator is positive, there are two 2183 // cases: 2184 // 2185 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2186 // (1 << ceilLogBase2(C)). 2187 // 2188 // 2. the numerator is negative. Then the result range is (-C,0] and 2189 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2190 // 2191 // Thus a lower bound on the number of sign bits is `TyBits - 2192 // ceilLogBase2(C)`. 2193 2194 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2195 return std::max(NumrBits, ResBits); 2196 } 2197 break; 2198 } 2199 2200 case Instruction::AShr: { 2201 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2202 // ashr X, C -> adds C sign bits. Vectors too. 2203 const APInt *ShAmt; 2204 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2205 unsigned ShAmtLimited = ShAmt->getZExtValue(); 2206 if (ShAmtLimited >= TyBits) 2207 break; // Bad shift. 2208 Tmp += ShAmtLimited; 2209 if (Tmp > TyBits) Tmp = TyBits; 2210 } 2211 return Tmp; 2212 } 2213 case Instruction::Shl: { 2214 const APInt *ShAmt; 2215 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2216 // shl destroys sign bits. 2217 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2218 Tmp2 = ShAmt->getZExtValue(); 2219 if (Tmp2 >= TyBits || // Bad shift. 2220 Tmp2 >= Tmp) break; // Shifted all sign bits out. 2221 return Tmp - Tmp2; 2222 } 2223 break; 2224 } 2225 case Instruction::And: 2226 case Instruction::Or: 2227 case Instruction::Xor: // NOT is handled here. 2228 // Logical binary ops preserve the number of sign bits at the worst. 2229 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2230 if (Tmp != 1) { 2231 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2232 FirstAnswer = std::min(Tmp, Tmp2); 2233 // We computed what we know about the sign bits as our first 2234 // answer. Now proceed to the generic code that uses 2235 // computeKnownBits, and pick whichever answer is better. 2236 } 2237 break; 2238 2239 case Instruction::Select: 2240 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2241 if (Tmp == 1) return 1; // Early out. 2242 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 2243 return std::min(Tmp, Tmp2); 2244 2245 case Instruction::Add: 2246 // Add can have at most one carry bit. Thus we know that the output 2247 // is, at worst, one more bit than the inputs. 2248 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2249 if (Tmp == 1) return 1; // Early out. 2250 2251 // Special case decrementing a value (ADD X, -1): 2252 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2253 if (CRHS->isAllOnesValue()) { 2254 KnownBits Known(TyBits); 2255 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 2256 2257 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2258 // sign bits set. 2259 if ((Known.Zero | 1).isAllOnesValue()) 2260 return TyBits; 2261 2262 // If we are subtracting one from a positive number, there is no carry 2263 // out of the result. 2264 if (Known.isNonNegative()) 2265 return Tmp; 2266 } 2267 2268 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2269 if (Tmp2 == 1) return 1; 2270 return std::min(Tmp, Tmp2)-1; 2271 2272 case Instruction::Sub: 2273 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2274 if (Tmp2 == 1) return 1; 2275 2276 // Handle NEG. 2277 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2278 if (CLHS->isNullValue()) { 2279 KnownBits Known(TyBits); 2280 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 2281 // If the input is known to be 0 or 1, the output is 0/-1, which is all 2282 // sign bits set. 2283 if ((Known.Zero | 1).isAllOnesValue()) 2284 return TyBits; 2285 2286 // If the input is known to be positive (the sign bit is known clear), 2287 // the output of the NEG has the same number of sign bits as the input. 2288 if (Known.isNonNegative()) 2289 return Tmp2; 2290 2291 // Otherwise, we treat this like a SUB. 2292 } 2293 2294 // Sub can have at most one carry bit. Thus we know that the output 2295 // is, at worst, one more bit than the inputs. 2296 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2297 if (Tmp == 1) return 1; // Early out. 2298 return std::min(Tmp, Tmp2)-1; 2299 2300 case Instruction::Mul: { 2301 // The output of the Mul can be at most twice the valid bits in the inputs. 2302 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2303 if (SignBitsOp0 == 1) return 1; // Early out. 2304 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2305 if (SignBitsOp1 == 1) return 1; 2306 unsigned OutValidBits = 2307 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); 2308 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; 2309 } 2310 2311 case Instruction::PHI: { 2312 const PHINode *PN = cast<PHINode>(U); 2313 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2314 // Don't analyze large in-degree PHIs. 2315 if (NumIncomingValues > 4) break; 2316 // Unreachable blocks may have zero-operand PHI nodes. 2317 if (NumIncomingValues == 0) break; 2318 2319 // Take the minimum of all incoming values. This can't infinitely loop 2320 // because of our depth threshold. 2321 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q); 2322 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2323 if (Tmp == 1) return Tmp; 2324 Tmp = std::min( 2325 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q)); 2326 } 2327 return Tmp; 2328 } 2329 2330 case Instruction::Trunc: 2331 // FIXME: it's tricky to do anything useful for this, but it is an important 2332 // case for targets like X86. 2333 break; 2334 2335 case Instruction::ExtractElement: 2336 // Look through extract element. At the moment we keep this simple and skip 2337 // tracking the specific element. But at least we might find information 2338 // valid for all elements of the vector (for example if vector is sign 2339 // extended, shifted, etc). 2340 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2341 } 2342 2343 // Finally, if we can prove that the top bits of the result are 0's or 1's, 2344 // use this information. 2345 2346 // If we can examine all elements of a vector constant successfully, we're 2347 // done (we can't do any better than that). If not, keep trying. 2348 if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits)) 2349 return VecSignBits; 2350 2351 KnownBits Known(TyBits); 2352 computeKnownBits(V, Known, Depth, Q); 2353 2354 // If we know that the sign bit is either zero or one, determine the number of 2355 // identical bits in the top of the input value. 2356 return std::max(FirstAnswer, Known.countMinSignBits()); 2357 } 2358 2359 /// This function computes the integer multiple of Base that equals V. 2360 /// If successful, it returns true and returns the multiple in 2361 /// Multiple. If unsuccessful, it returns false. It looks 2362 /// through SExt instructions only if LookThroughSExt is true. 2363 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 2364 bool LookThroughSExt, unsigned Depth) { 2365 const unsigned MaxDepth = 6; 2366 2367 assert(V && "No Value?"); 2368 assert(Depth <= MaxDepth && "Limit Search Depth"); 2369 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 2370 2371 Type *T = V->getType(); 2372 2373 ConstantInt *CI = dyn_cast<ConstantInt>(V); 2374 2375 if (Base == 0) 2376 return false; 2377 2378 if (Base == 1) { 2379 Multiple = V; 2380 return true; 2381 } 2382 2383 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 2384 Constant *BaseVal = ConstantInt::get(T, Base); 2385 if (CO && CO == BaseVal) { 2386 // Multiple is 1. 2387 Multiple = ConstantInt::get(T, 1); 2388 return true; 2389 } 2390 2391 if (CI && CI->getZExtValue() % Base == 0) { 2392 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 2393 return true; 2394 } 2395 2396 if (Depth == MaxDepth) return false; // Limit search depth. 2397 2398 Operator *I = dyn_cast<Operator>(V); 2399 if (!I) return false; 2400 2401 switch (I->getOpcode()) { 2402 default: break; 2403 case Instruction::SExt: 2404 if (!LookThroughSExt) return false; 2405 // otherwise fall through to ZExt 2406 LLVM_FALLTHROUGH; 2407 case Instruction::ZExt: 2408 return ComputeMultiple(I->getOperand(0), Base, Multiple, 2409 LookThroughSExt, Depth+1); 2410 case Instruction::Shl: 2411 case Instruction::Mul: { 2412 Value *Op0 = I->getOperand(0); 2413 Value *Op1 = I->getOperand(1); 2414 2415 if (I->getOpcode() == Instruction::Shl) { 2416 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 2417 if (!Op1CI) return false; 2418 // Turn Op0 << Op1 into Op0 * 2^Op1 2419 APInt Op1Int = Op1CI->getValue(); 2420 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 2421 APInt API(Op1Int.getBitWidth(), 0); 2422 API.setBit(BitToSet); 2423 Op1 = ConstantInt::get(V->getContext(), API); 2424 } 2425 2426 Value *Mul0 = nullptr; 2427 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 2428 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 2429 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 2430 if (Op1C->getType()->getPrimitiveSizeInBits() < 2431 MulC->getType()->getPrimitiveSizeInBits()) 2432 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 2433 if (Op1C->getType()->getPrimitiveSizeInBits() > 2434 MulC->getType()->getPrimitiveSizeInBits()) 2435 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 2436 2437 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 2438 Multiple = ConstantExpr::getMul(MulC, Op1C); 2439 return true; 2440 } 2441 2442 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 2443 if (Mul0CI->getValue() == 1) { 2444 // V == Base * Op1, so return Op1 2445 Multiple = Op1; 2446 return true; 2447 } 2448 } 2449 2450 Value *Mul1 = nullptr; 2451 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 2452 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 2453 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 2454 if (Op0C->getType()->getPrimitiveSizeInBits() < 2455 MulC->getType()->getPrimitiveSizeInBits()) 2456 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 2457 if (Op0C->getType()->getPrimitiveSizeInBits() > 2458 MulC->getType()->getPrimitiveSizeInBits()) 2459 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 2460 2461 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 2462 Multiple = ConstantExpr::getMul(MulC, Op0C); 2463 return true; 2464 } 2465 2466 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 2467 if (Mul1CI->getValue() == 1) { 2468 // V == Base * Op0, so return Op0 2469 Multiple = Op0; 2470 return true; 2471 } 2472 } 2473 } 2474 } 2475 2476 // We could not determine if V is a multiple of Base. 2477 return false; 2478 } 2479 2480 Intrinsic::ID llvm::getIntrinsicForCallSite(ImmutableCallSite ICS, 2481 const TargetLibraryInfo *TLI) { 2482 const Function *F = ICS.getCalledFunction(); 2483 if (!F) 2484 return Intrinsic::not_intrinsic; 2485 2486 if (F->isIntrinsic()) 2487 return F->getIntrinsicID(); 2488 2489 if (!TLI) 2490 return Intrinsic::not_intrinsic; 2491 2492 LibFunc Func; 2493 // We're going to make assumptions on the semantics of the functions, check 2494 // that the target knows that it's available in this environment and it does 2495 // not have local linkage. 2496 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func)) 2497 return Intrinsic::not_intrinsic; 2498 2499 if (!ICS.onlyReadsMemory()) 2500 return Intrinsic::not_intrinsic; 2501 2502 // Otherwise check if we have a call to a function that can be turned into a 2503 // vector intrinsic. 2504 switch (Func) { 2505 default: 2506 break; 2507 case LibFunc_sin: 2508 case LibFunc_sinf: 2509 case LibFunc_sinl: 2510 return Intrinsic::sin; 2511 case LibFunc_cos: 2512 case LibFunc_cosf: 2513 case LibFunc_cosl: 2514 return Intrinsic::cos; 2515 case LibFunc_exp: 2516 case LibFunc_expf: 2517 case LibFunc_expl: 2518 return Intrinsic::exp; 2519 case LibFunc_exp2: 2520 case LibFunc_exp2f: 2521 case LibFunc_exp2l: 2522 return Intrinsic::exp2; 2523 case LibFunc_log: 2524 case LibFunc_logf: 2525 case LibFunc_logl: 2526 return Intrinsic::log; 2527 case LibFunc_log10: 2528 case LibFunc_log10f: 2529 case LibFunc_log10l: 2530 return Intrinsic::log10; 2531 case LibFunc_log2: 2532 case LibFunc_log2f: 2533 case LibFunc_log2l: 2534 return Intrinsic::log2; 2535 case LibFunc_fabs: 2536 case LibFunc_fabsf: 2537 case LibFunc_fabsl: 2538 return Intrinsic::fabs; 2539 case LibFunc_fmin: 2540 case LibFunc_fminf: 2541 case LibFunc_fminl: 2542 return Intrinsic::minnum; 2543 case LibFunc_fmax: 2544 case LibFunc_fmaxf: 2545 case LibFunc_fmaxl: 2546 return Intrinsic::maxnum; 2547 case LibFunc_copysign: 2548 case LibFunc_copysignf: 2549 case LibFunc_copysignl: 2550 return Intrinsic::copysign; 2551 case LibFunc_floor: 2552 case LibFunc_floorf: 2553 case LibFunc_floorl: 2554 return Intrinsic::floor; 2555 case LibFunc_ceil: 2556 case LibFunc_ceilf: 2557 case LibFunc_ceill: 2558 return Intrinsic::ceil; 2559 case LibFunc_trunc: 2560 case LibFunc_truncf: 2561 case LibFunc_truncl: 2562 return Intrinsic::trunc; 2563 case LibFunc_rint: 2564 case LibFunc_rintf: 2565 case LibFunc_rintl: 2566 return Intrinsic::rint; 2567 case LibFunc_nearbyint: 2568 case LibFunc_nearbyintf: 2569 case LibFunc_nearbyintl: 2570 return Intrinsic::nearbyint; 2571 case LibFunc_round: 2572 case LibFunc_roundf: 2573 case LibFunc_roundl: 2574 return Intrinsic::round; 2575 case LibFunc_pow: 2576 case LibFunc_powf: 2577 case LibFunc_powl: 2578 return Intrinsic::pow; 2579 case LibFunc_sqrt: 2580 case LibFunc_sqrtf: 2581 case LibFunc_sqrtl: 2582 if (ICS->hasNoNaNs()) 2583 return Intrinsic::sqrt; 2584 return Intrinsic::not_intrinsic; 2585 } 2586 2587 return Intrinsic::not_intrinsic; 2588 } 2589 2590 /// Return true if we can prove that the specified FP value is never equal to 2591 /// -0.0. 2592 /// 2593 /// NOTE: this function will need to be revisited when we support non-default 2594 /// rounding modes! 2595 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 2596 unsigned Depth) { 2597 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 2598 return !CFP->getValueAPF().isNegZero(); 2599 2600 if (Depth == MaxDepth) 2601 return false; // Limit search depth. 2602 2603 const Operator *I = dyn_cast<Operator>(V); 2604 if (!I) return false; 2605 2606 // Check if the nsz fast-math flag is set 2607 if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I)) 2608 if (FPO->hasNoSignedZeros()) 2609 return true; 2610 2611 // (add x, 0.0) is guaranteed to return +0.0, not -0.0. 2612 if (I->getOpcode() == Instruction::FAdd) 2613 if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1))) 2614 if (CFP->isNullValue()) 2615 return true; 2616 2617 // sitofp and uitofp turn into +0.0 for zero. 2618 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) 2619 return true; 2620 2621 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 2622 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2623 switch (IID) { 2624 default: 2625 break; 2626 // sqrt(-0.0) = -0.0, no other negative results are possible. 2627 case Intrinsic::sqrt: 2628 return CannotBeNegativeZero(CI->getArgOperand(0), TLI, Depth + 1); 2629 // fabs(x) != -0.0 2630 case Intrinsic::fabs: 2631 return true; 2632 } 2633 } 2634 2635 return false; 2636 } 2637 2638 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 2639 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 2640 /// bit despite comparing equal. 2641 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 2642 const TargetLibraryInfo *TLI, 2643 bool SignBitOnly, 2644 unsigned Depth) { 2645 // TODO: This function does not do the right thing when SignBitOnly is true 2646 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 2647 // which flips the sign bits of NaNs. See 2648 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2649 2650 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2651 return !CFP->getValueAPF().isNegative() || 2652 (!SignBitOnly && CFP->getValueAPF().isZero()); 2653 } 2654 2655 if (Depth == MaxDepth) 2656 return false; // Limit search depth. 2657 2658 const Operator *I = dyn_cast<Operator>(V); 2659 if (!I) 2660 return false; 2661 2662 switch (I->getOpcode()) { 2663 default: 2664 break; 2665 // Unsigned integers are always nonnegative. 2666 case Instruction::UIToFP: 2667 return true; 2668 case Instruction::FMul: 2669 // x*x is always non-negative or a NaN. 2670 if (I->getOperand(0) == I->getOperand(1) && 2671 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 2672 return true; 2673 2674 LLVM_FALLTHROUGH; 2675 case Instruction::FAdd: 2676 case Instruction::FDiv: 2677 case Instruction::FRem: 2678 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2679 Depth + 1) && 2680 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2681 Depth + 1); 2682 case Instruction::Select: 2683 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2684 Depth + 1) && 2685 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2686 Depth + 1); 2687 case Instruction::FPExt: 2688 case Instruction::FPTrunc: 2689 // Widening/narrowing never change sign. 2690 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2691 Depth + 1); 2692 case Instruction::Call: 2693 const auto *CI = cast<CallInst>(I); 2694 Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI); 2695 switch (IID) { 2696 default: 2697 break; 2698 case Intrinsic::maxnum: 2699 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2700 Depth + 1) || 2701 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2702 Depth + 1); 2703 case Intrinsic::minnum: 2704 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2705 Depth + 1) && 2706 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 2707 Depth + 1); 2708 case Intrinsic::exp: 2709 case Intrinsic::exp2: 2710 case Intrinsic::fabs: 2711 return true; 2712 2713 case Intrinsic::sqrt: 2714 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 2715 if (!SignBitOnly) 2716 return true; 2717 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 2718 CannotBeNegativeZero(CI->getOperand(0), TLI)); 2719 2720 case Intrinsic::powi: 2721 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 2722 // powi(x,n) is non-negative if n is even. 2723 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 2724 return true; 2725 } 2726 // TODO: This is not correct. Given that exp is an integer, here are the 2727 // ways that pow can return a negative value: 2728 // 2729 // pow(x, exp) --> negative if exp is odd and x is negative. 2730 // pow(-0, exp) --> -inf if exp is negative odd. 2731 // pow(-0, exp) --> -0 if exp is positive odd. 2732 // pow(-inf, exp) --> -0 if exp is negative odd. 2733 // pow(-inf, exp) --> -inf if exp is positive odd. 2734 // 2735 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 2736 // but we must return false if x == -0. Unfortunately we do not currently 2737 // have a way of expressing this constraint. See details in 2738 // https://llvm.org/bugs/show_bug.cgi?id=31702. 2739 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 2740 Depth + 1); 2741 2742 case Intrinsic::fma: 2743 case Intrinsic::fmuladd: 2744 // x*x+y is non-negative if y is non-negative. 2745 return I->getOperand(0) == I->getOperand(1) && 2746 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 2747 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 2748 Depth + 1); 2749 } 2750 break; 2751 } 2752 return false; 2753 } 2754 2755 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 2756 const TargetLibraryInfo *TLI) { 2757 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 2758 } 2759 2760 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 2761 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 2762 } 2763 2764 bool llvm::isKnownNeverNaN(const Value *V) { 2765 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"); 2766 2767 // If we're told that NaNs won't happen, assume they won't. 2768 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 2769 if (FPMathOp->hasNoNaNs()) 2770 return true; 2771 2772 // TODO: Handle instructions and potentially recurse like other 'isKnown' 2773 // functions. For example, the result of sitofp is never NaN. 2774 2775 // Handle scalar constants. 2776 if (auto *CFP = dyn_cast<ConstantFP>(V)) 2777 return !CFP->isNaN(); 2778 2779 // Bail out for constant expressions, but try to handle vector constants. 2780 if (!V->getType()->isVectorTy() || !isa<Constant>(V)) 2781 return false; 2782 2783 // For vectors, verify that each element is not NaN. 2784 unsigned NumElts = V->getType()->getVectorNumElements(); 2785 for (unsigned i = 0; i != NumElts; ++i) { 2786 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 2787 if (!Elt) 2788 return false; 2789 if (isa<UndefValue>(Elt)) 2790 continue; 2791 auto *CElt = dyn_cast<ConstantFP>(Elt); 2792 if (!CElt || CElt->isNaN()) 2793 return false; 2794 } 2795 // All elements were confirmed not-NaN or undefined. 2796 return true; 2797 } 2798 2799 /// If the specified value can be set by repeating the same byte in memory, 2800 /// return the i8 value that it is represented with. This is 2801 /// true for all i8 values obviously, but is also true for i32 0, i32 -1, 2802 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated 2803 /// byte store (e.g. i16 0x1234), return null. 2804 Value *llvm::isBytewiseValue(Value *V) { 2805 // All byte-wide stores are splatable, even of arbitrary variables. 2806 if (V->getType()->isIntegerTy(8)) return V; 2807 2808 // Handle 'null' ConstantArrayZero etc. 2809 if (Constant *C = dyn_cast<Constant>(V)) 2810 if (C->isNullValue()) 2811 return Constant::getNullValue(Type::getInt8Ty(V->getContext())); 2812 2813 // Constant float and double values can be handled as integer values if the 2814 // corresponding integer value is "byteable". An important case is 0.0. 2815 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 2816 if (CFP->getType()->isFloatTy()) 2817 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(V->getContext())); 2818 if (CFP->getType()->isDoubleTy()) 2819 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(V->getContext())); 2820 // Don't handle long double formats, which have strange constraints. 2821 } 2822 2823 // We can handle constant integers that are multiple of 8 bits. 2824 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 2825 if (CI->getBitWidth() % 8 == 0) { 2826 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 2827 2828 if (!CI->getValue().isSplat(8)) 2829 return nullptr; 2830 return ConstantInt::get(V->getContext(), CI->getValue().trunc(8)); 2831 } 2832 } 2833 2834 // A ConstantDataArray/Vector is splatable if all its members are equal and 2835 // also splatable. 2836 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) { 2837 Value *Elt = CA->getElementAsConstant(0); 2838 Value *Val = isBytewiseValue(Elt); 2839 if (!Val) 2840 return nullptr; 2841 2842 for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I) 2843 if (CA->getElementAsConstant(I) != Elt) 2844 return nullptr; 2845 2846 return Val; 2847 } 2848 2849 // Conceptually, we could handle things like: 2850 // %a = zext i8 %X to i16 2851 // %b = shl i16 %a, 8 2852 // %c = or i16 %a, %b 2853 // but until there is an example that actually needs this, it doesn't seem 2854 // worth worrying about. 2855 return nullptr; 2856 } 2857 2858 // This is the recursive version of BuildSubAggregate. It takes a few different 2859 // arguments. Idxs is the index within the nested struct From that we are 2860 // looking at now (which is of type IndexedType). IdxSkip is the number of 2861 // indices from Idxs that should be left out when inserting into the resulting 2862 // struct. To is the result struct built so far, new insertvalue instructions 2863 // build on that. 2864 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 2865 SmallVectorImpl<unsigned> &Idxs, 2866 unsigned IdxSkip, 2867 Instruction *InsertBefore) { 2868 StructType *STy = dyn_cast<StructType>(IndexedType); 2869 if (STy) { 2870 // Save the original To argument so we can modify it 2871 Value *OrigTo = To; 2872 // General case, the type indexed by Idxs is a struct 2873 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2874 // Process each struct element recursively 2875 Idxs.push_back(i); 2876 Value *PrevTo = To; 2877 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 2878 InsertBefore); 2879 Idxs.pop_back(); 2880 if (!To) { 2881 // Couldn't find any inserted value for this index? Cleanup 2882 while (PrevTo != OrigTo) { 2883 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 2884 PrevTo = Del->getAggregateOperand(); 2885 Del->eraseFromParent(); 2886 } 2887 // Stop processing elements 2888 break; 2889 } 2890 } 2891 // If we successfully found a value for each of our subaggregates 2892 if (To) 2893 return To; 2894 } 2895 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 2896 // the struct's elements had a value that was inserted directly. In the latter 2897 // case, perhaps we can't determine each of the subelements individually, but 2898 // we might be able to find the complete struct somewhere. 2899 2900 // Find the value that is at that particular spot 2901 Value *V = FindInsertedValue(From, Idxs); 2902 2903 if (!V) 2904 return nullptr; 2905 2906 // Insert the value in the new (sub) aggregrate 2907 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 2908 "tmp", InsertBefore); 2909 } 2910 2911 // This helper takes a nested struct and extracts a part of it (which is again a 2912 // struct) into a new value. For example, given the struct: 2913 // { a, { b, { c, d }, e } } 2914 // and the indices "1, 1" this returns 2915 // { c, d }. 2916 // 2917 // It does this by inserting an insertvalue for each element in the resulting 2918 // struct, as opposed to just inserting a single struct. This will only work if 2919 // each of the elements of the substruct are known (ie, inserted into From by an 2920 // insertvalue instruction somewhere). 2921 // 2922 // All inserted insertvalue instructions are inserted before InsertBefore 2923 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 2924 Instruction *InsertBefore) { 2925 assert(InsertBefore && "Must have someplace to insert!"); 2926 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 2927 idx_range); 2928 Value *To = UndefValue::get(IndexedType); 2929 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 2930 unsigned IdxSkip = Idxs.size(); 2931 2932 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 2933 } 2934 2935 /// Given an aggregrate and an sequence of indices, see if 2936 /// the scalar value indexed is already around as a register, for example if it 2937 /// were inserted directly into the aggregrate. 2938 /// 2939 /// If InsertBefore is not null, this function will duplicate (modified) 2940 /// insertvalues when a part of a nested struct is extracted. 2941 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 2942 Instruction *InsertBefore) { 2943 // Nothing to index? Just return V then (this is useful at the end of our 2944 // recursion). 2945 if (idx_range.empty()) 2946 return V; 2947 // We have indices, so V should have an indexable type. 2948 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 2949 "Not looking at a struct or array?"); 2950 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 2951 "Invalid indices for type?"); 2952 2953 if (Constant *C = dyn_cast<Constant>(V)) { 2954 C = C->getAggregateElement(idx_range[0]); 2955 if (!C) return nullptr; 2956 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 2957 } 2958 2959 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 2960 // Loop the indices for the insertvalue instruction in parallel with the 2961 // requested indices 2962 const unsigned *req_idx = idx_range.begin(); 2963 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 2964 i != e; ++i, ++req_idx) { 2965 if (req_idx == idx_range.end()) { 2966 // We can't handle this without inserting insertvalues 2967 if (!InsertBefore) 2968 return nullptr; 2969 2970 // The requested index identifies a part of a nested aggregate. Handle 2971 // this specially. For example, 2972 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 2973 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 2974 // %C = extractvalue {i32, { i32, i32 } } %B, 1 2975 // This can be changed into 2976 // %A = insertvalue {i32, i32 } undef, i32 10, 0 2977 // %C = insertvalue {i32, i32 } %A, i32 11, 1 2978 // which allows the unused 0,0 element from the nested struct to be 2979 // removed. 2980 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 2981 InsertBefore); 2982 } 2983 2984 // This insert value inserts something else than what we are looking for. 2985 // See if the (aggregate) value inserted into has the value we are 2986 // looking for, then. 2987 if (*req_idx != *i) 2988 return FindInsertedValue(I->getAggregateOperand(), idx_range, 2989 InsertBefore); 2990 } 2991 // If we end up here, the indices of the insertvalue match with those 2992 // requested (though possibly only partially). Now we recursively look at 2993 // the inserted value, passing any remaining indices. 2994 return FindInsertedValue(I->getInsertedValueOperand(), 2995 makeArrayRef(req_idx, idx_range.end()), 2996 InsertBefore); 2997 } 2998 2999 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 3000 // If we're extracting a value from an aggregate that was extracted from 3001 // something else, we can extract from that something else directly instead. 3002 // However, we will need to chain I's indices with the requested indices. 3003 3004 // Calculate the number of indices required 3005 unsigned size = I->getNumIndices() + idx_range.size(); 3006 // Allocate some space to put the new indices in 3007 SmallVector<unsigned, 5> Idxs; 3008 Idxs.reserve(size); 3009 // Add indices from the extract value instruction 3010 Idxs.append(I->idx_begin(), I->idx_end()); 3011 3012 // Add requested indices 3013 Idxs.append(idx_range.begin(), idx_range.end()); 3014 3015 assert(Idxs.size() == size 3016 && "Number of indices added not correct?"); 3017 3018 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 3019 } 3020 // Otherwise, we don't know (such as, extracting from a function return value 3021 // or load instruction) 3022 return nullptr; 3023 } 3024 3025 /// Analyze the specified pointer to see if it can be expressed as a base 3026 /// pointer plus a constant offset. Return the base and offset to the caller. 3027 Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, 3028 const DataLayout &DL) { 3029 unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType()); 3030 APInt ByteOffset(BitWidth, 0); 3031 3032 // We walk up the defs but use a visited set to handle unreachable code. In 3033 // that case, we stop after accumulating the cycle once (not that it 3034 // matters). 3035 SmallPtrSet<Value *, 16> Visited; 3036 while (Visited.insert(Ptr).second) { 3037 if (Ptr->getType()->isVectorTy()) 3038 break; 3039 3040 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) { 3041 // If one of the values we have visited is an addrspacecast, then 3042 // the pointer type of this GEP may be different from the type 3043 // of the Ptr parameter which was passed to this function. This 3044 // means when we construct GEPOffset, we need to use the size 3045 // of GEP's pointer type rather than the size of the original 3046 // pointer type. 3047 APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0); 3048 if (!GEP->accumulateConstantOffset(DL, GEPOffset)) 3049 break; 3050 3051 ByteOffset += GEPOffset.getSExtValue(); 3052 3053 Ptr = GEP->getPointerOperand(); 3054 } else if (Operator::getOpcode(Ptr) == Instruction::BitCast || 3055 Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) { 3056 Ptr = cast<Operator>(Ptr)->getOperand(0); 3057 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) { 3058 if (GA->isInterposable()) 3059 break; 3060 Ptr = GA->getAliasee(); 3061 } else { 3062 break; 3063 } 3064 } 3065 Offset = ByteOffset.getSExtValue(); 3066 return Ptr; 3067 } 3068 3069 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 3070 unsigned CharSize) { 3071 // Make sure the GEP has exactly three arguments. 3072 if (GEP->getNumOperands() != 3) 3073 return false; 3074 3075 // Make sure the index-ee is a pointer to array of \p CharSize integers. 3076 // CharSize. 3077 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 3078 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 3079 return false; 3080 3081 // Check to make sure that the first operand of the GEP is an integer and 3082 // has value 0 so that we are sure we're indexing into the initializer. 3083 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 3084 if (!FirstIdx || !FirstIdx->isZero()) 3085 return false; 3086 3087 return true; 3088 } 3089 3090 bool llvm::getConstantDataArrayInfo(const Value *V, 3091 ConstantDataArraySlice &Slice, 3092 unsigned ElementSize, uint64_t Offset) { 3093 assert(V); 3094 3095 // Look through bitcast instructions and geps. 3096 V = V->stripPointerCasts(); 3097 3098 // If the value is a GEP instruction or constant expression, treat it as an 3099 // offset. 3100 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3101 // The GEP operator should be based on a pointer to string constant, and is 3102 // indexing into the string constant. 3103 if (!isGEPBasedOnPointerToString(GEP, ElementSize)) 3104 return false; 3105 3106 // If the second index isn't a ConstantInt, then this is a variable index 3107 // into the array. If this occurs, we can't say anything meaningful about 3108 // the string. 3109 uint64_t StartIdx = 0; 3110 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 3111 StartIdx = CI->getZExtValue(); 3112 else 3113 return false; 3114 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize, 3115 StartIdx + Offset); 3116 } 3117 3118 // The GEP instruction, constant or instruction, must reference a global 3119 // variable that is a constant and is initialized. The referenced constant 3120 // initializer is the array that we'll use for optimization. 3121 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 3122 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 3123 return false; 3124 3125 const ConstantDataArray *Array; 3126 ArrayType *ArrayTy; 3127 if (GV->getInitializer()->isNullValue()) { 3128 Type *GVTy = GV->getValueType(); 3129 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) { 3130 // A zeroinitializer for the array; there is no ConstantDataArray. 3131 Array = nullptr; 3132 } else { 3133 const DataLayout &DL = GV->getParent()->getDataLayout(); 3134 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy); 3135 uint64_t Length = SizeInBytes / (ElementSize / 8); 3136 if (Length <= Offset) 3137 return false; 3138 3139 Slice.Array = nullptr; 3140 Slice.Offset = 0; 3141 Slice.Length = Length - Offset; 3142 return true; 3143 } 3144 } else { 3145 // This must be a ConstantDataArray. 3146 Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 3147 if (!Array) 3148 return false; 3149 ArrayTy = Array->getType(); 3150 } 3151 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize)) 3152 return false; 3153 3154 uint64_t NumElts = ArrayTy->getArrayNumElements(); 3155 if (Offset > NumElts) 3156 return false; 3157 3158 Slice.Array = Array; 3159 Slice.Offset = Offset; 3160 Slice.Length = NumElts - Offset; 3161 return true; 3162 } 3163 3164 /// This function computes the length of a null-terminated C string pointed to 3165 /// by V. If successful, it returns true and returns the string in Str. 3166 /// If unsuccessful, it returns false. 3167 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 3168 uint64_t Offset, bool TrimAtNul) { 3169 ConstantDataArraySlice Slice; 3170 if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) 3171 return false; 3172 3173 if (Slice.Array == nullptr) { 3174 if (TrimAtNul) { 3175 Str = StringRef(); 3176 return true; 3177 } 3178 if (Slice.Length == 1) { 3179 Str = StringRef("", 1); 3180 return true; 3181 } 3182 // We cannot instantiate a StringRef as we do not have an appropriate string 3183 // of 0s at hand. 3184 return false; 3185 } 3186 3187 // Start out with the entire array in the StringRef. 3188 Str = Slice.Array->getAsString(); 3189 // Skip over 'offset' bytes. 3190 Str = Str.substr(Slice.Offset); 3191 3192 if (TrimAtNul) { 3193 // Trim off the \0 and anything after it. If the array is not nul 3194 // terminated, we just return the whole end of string. The client may know 3195 // some other way that the string is length-bound. 3196 Str = Str.substr(0, Str.find('\0')); 3197 } 3198 return true; 3199 } 3200 3201 // These next two are very similar to the above, but also look through PHI 3202 // nodes. 3203 // TODO: See if we can integrate these two together. 3204 3205 /// If we can compute the length of the string pointed to by 3206 /// the specified pointer, return 'len+1'. If we can't, return 0. 3207 static uint64_t GetStringLengthH(const Value *V, 3208 SmallPtrSetImpl<const PHINode*> &PHIs, 3209 unsigned CharSize) { 3210 // Look through noop bitcast instructions. 3211 V = V->stripPointerCasts(); 3212 3213 // If this is a PHI node, there are two cases: either we have already seen it 3214 // or we haven't. 3215 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 3216 if (!PHIs.insert(PN).second) 3217 return ~0ULL; // already in the set. 3218 3219 // If it was new, see if all the input strings are the same length. 3220 uint64_t LenSoFar = ~0ULL; 3221 for (Value *IncValue : PN->incoming_values()) { 3222 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 3223 if (Len == 0) return 0; // Unknown length -> unknown. 3224 3225 if (Len == ~0ULL) continue; 3226 3227 if (Len != LenSoFar && LenSoFar != ~0ULL) 3228 return 0; // Disagree -> unknown. 3229 LenSoFar = Len; 3230 } 3231 3232 // Success, all agree. 3233 return LenSoFar; 3234 } 3235 3236 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 3237 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 3238 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 3239 if (Len1 == 0) return 0; 3240 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 3241 if (Len2 == 0) return 0; 3242 if (Len1 == ~0ULL) return Len2; 3243 if (Len2 == ~0ULL) return Len1; 3244 if (Len1 != Len2) return 0; 3245 return Len1; 3246 } 3247 3248 // Otherwise, see if we can read the string. 3249 ConstantDataArraySlice Slice; 3250 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 3251 return 0; 3252 3253 if (Slice.Array == nullptr) 3254 return 1; 3255 3256 // Search for nul characters 3257 unsigned NullIndex = 0; 3258 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 3259 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 3260 break; 3261 } 3262 3263 return NullIndex + 1; 3264 } 3265 3266 /// If we can compute the length of the string pointed to by 3267 /// the specified pointer, return 'len+1'. If we can't, return 0. 3268 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 3269 if (!V->getType()->isPointerTy()) return 0; 3270 3271 SmallPtrSet<const PHINode*, 32> PHIs; 3272 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 3273 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 3274 // an empty string as a length. 3275 return Len == ~0ULL ? 1 : Len; 3276 } 3277 3278 /// \brief \p PN defines a loop-variant pointer to an object. Check if the 3279 /// previous iteration of the loop was referring to the same object as \p PN. 3280 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 3281 const LoopInfo *LI) { 3282 // Find the loop-defined value. 3283 Loop *L = LI->getLoopFor(PN->getParent()); 3284 if (PN->getNumIncomingValues() != 2) 3285 return true; 3286 3287 // Find the value from previous iteration. 3288 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 3289 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3290 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 3291 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 3292 return true; 3293 3294 // If a new pointer is loaded in the loop, the pointer references a different 3295 // object in every iteration. E.g.: 3296 // for (i) 3297 // int *p = a[i]; 3298 // ... 3299 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 3300 if (!L->isLoopInvariant(Load->getPointerOperand())) 3301 return false; 3302 return true; 3303 } 3304 3305 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 3306 unsigned MaxLookup) { 3307 if (!V->getType()->isPointerTy()) 3308 return V; 3309 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 3310 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3311 V = GEP->getPointerOperand(); 3312 } else if (Operator::getOpcode(V) == Instruction::BitCast || 3313 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 3314 V = cast<Operator>(V)->getOperand(0); 3315 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 3316 if (GA->isInterposable()) 3317 return V; 3318 V = GA->getAliasee(); 3319 } else if (isa<AllocaInst>(V)) { 3320 // An alloca can't be further simplified. 3321 return V; 3322 } else { 3323 if (auto CS = CallSite(V)) 3324 if (Value *RV = CS.getReturnedArgOperand()) { 3325 V = RV; 3326 continue; 3327 } 3328 3329 // See if InstructionSimplify knows any relevant tricks. 3330 if (Instruction *I = dyn_cast<Instruction>(V)) 3331 // TODO: Acquire a DominatorTree and AssumptionCache and use them. 3332 if (Value *Simplified = SimplifyInstruction(I, {DL, I})) { 3333 V = Simplified; 3334 continue; 3335 } 3336 3337 return V; 3338 } 3339 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 3340 } 3341 return V; 3342 } 3343 3344 void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects, 3345 const DataLayout &DL, LoopInfo *LI, 3346 unsigned MaxLookup) { 3347 SmallPtrSet<Value *, 4> Visited; 3348 SmallVector<Value *, 4> Worklist; 3349 Worklist.push_back(V); 3350 do { 3351 Value *P = Worklist.pop_back_val(); 3352 P = GetUnderlyingObject(P, DL, MaxLookup); 3353 3354 if (!Visited.insert(P).second) 3355 continue; 3356 3357 if (SelectInst *SI = dyn_cast<SelectInst>(P)) { 3358 Worklist.push_back(SI->getTrueValue()); 3359 Worklist.push_back(SI->getFalseValue()); 3360 continue; 3361 } 3362 3363 if (PHINode *PN = dyn_cast<PHINode>(P)) { 3364 // If this PHI changes the underlying object in every iteration of the 3365 // loop, don't look through it. Consider: 3366 // int **A; 3367 // for (i) { 3368 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 3369 // Curr = A[i]; 3370 // *Prev, *Curr; 3371 // 3372 // Prev is tracking Curr one iteration behind so they refer to different 3373 // underlying objects. 3374 if (!LI || !LI->isLoopHeader(PN->getParent()) || 3375 isSameUnderlyingObjectInLoop(PN, LI)) 3376 for (Value *IncValue : PN->incoming_values()) 3377 Worklist.push_back(IncValue); 3378 continue; 3379 } 3380 3381 Objects.push_back(P); 3382 } while (!Worklist.empty()); 3383 } 3384 3385 /// This is the function that does the work of looking through basic 3386 /// ptrtoint+arithmetic+inttoptr sequences. 3387 static const Value *getUnderlyingObjectFromInt(const Value *V) { 3388 do { 3389 if (const Operator *U = dyn_cast<Operator>(V)) { 3390 // If we find a ptrtoint, we can transfer control back to the 3391 // regular getUnderlyingObjectFromInt. 3392 if (U->getOpcode() == Instruction::PtrToInt) 3393 return U->getOperand(0); 3394 // If we find an add of a constant, a multiplied value, or a phi, it's 3395 // likely that the other operand will lead us to the base 3396 // object. We don't have to worry about the case where the 3397 // object address is somehow being computed by the multiply, 3398 // because our callers only care when the result is an 3399 // identifiable object. 3400 if (U->getOpcode() != Instruction::Add || 3401 (!isa<ConstantInt>(U->getOperand(1)) && 3402 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 3403 !isa<PHINode>(U->getOperand(1)))) 3404 return V; 3405 V = U->getOperand(0); 3406 } else { 3407 return V; 3408 } 3409 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 3410 } while (true); 3411 } 3412 3413 /// This is a wrapper around GetUnderlyingObjects and adds support for basic 3414 /// ptrtoint+arithmetic+inttoptr sequences. 3415 /// It returns false if unidentified object is found in GetUnderlyingObjects. 3416 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, 3417 SmallVectorImpl<Value *> &Objects, 3418 const DataLayout &DL) { 3419 SmallPtrSet<const Value *, 16> Visited; 3420 SmallVector<const Value *, 4> Working(1, V); 3421 do { 3422 V = Working.pop_back_val(); 3423 3424 SmallVector<Value *, 4> Objs; 3425 GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL); 3426 3427 for (Value *V : Objs) { 3428 if (!Visited.insert(V).second) 3429 continue; 3430 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 3431 const Value *O = 3432 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 3433 if (O->getType()->isPointerTy()) { 3434 Working.push_back(O); 3435 continue; 3436 } 3437 } 3438 // If GetUnderlyingObjects fails to find an identifiable object, 3439 // getUnderlyingObjectsForCodeGen also fails for safety. 3440 if (!isIdentifiedObject(V)) { 3441 Objects.clear(); 3442 return false; 3443 } 3444 Objects.push_back(const_cast<Value *>(V)); 3445 } 3446 } while (!Working.empty()); 3447 return true; 3448 } 3449 3450 /// Return true if the only users of this pointer are lifetime markers. 3451 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 3452 for (const User *U : V->users()) { 3453 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 3454 if (!II) return false; 3455 3456 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 3457 II->getIntrinsicID() != Intrinsic::lifetime_end) 3458 return false; 3459 } 3460 return true; 3461 } 3462 3463 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 3464 const Instruction *CtxI, 3465 const DominatorTree *DT) { 3466 const Operator *Inst = dyn_cast<Operator>(V); 3467 if (!Inst) 3468 return false; 3469 3470 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 3471 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 3472 if (C->canTrap()) 3473 return false; 3474 3475 switch (Inst->getOpcode()) { 3476 default: 3477 return true; 3478 case Instruction::UDiv: 3479 case Instruction::URem: { 3480 // x / y is undefined if y == 0. 3481 const APInt *V; 3482 if (match(Inst->getOperand(1), m_APInt(V))) 3483 return *V != 0; 3484 return false; 3485 } 3486 case Instruction::SDiv: 3487 case Instruction::SRem: { 3488 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 3489 const APInt *Numerator, *Denominator; 3490 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 3491 return false; 3492 // We cannot hoist this division if the denominator is 0. 3493 if (*Denominator == 0) 3494 return false; 3495 // It's safe to hoist if the denominator is not 0 or -1. 3496 if (*Denominator != -1) 3497 return true; 3498 // At this point we know that the denominator is -1. It is safe to hoist as 3499 // long we know that the numerator is not INT_MIN. 3500 if (match(Inst->getOperand(0), m_APInt(Numerator))) 3501 return !Numerator->isMinSignedValue(); 3502 // The numerator *might* be MinSignedValue. 3503 return false; 3504 } 3505 case Instruction::Load: { 3506 const LoadInst *LI = cast<LoadInst>(Inst); 3507 if (!LI->isUnordered() || 3508 // Speculative load may create a race that did not exist in the source. 3509 LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) || 3510 // Speculative load may load data from dirty regions. 3511 LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress)) 3512 return false; 3513 const DataLayout &DL = LI->getModule()->getDataLayout(); 3514 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), 3515 LI->getAlignment(), DL, CtxI, DT); 3516 } 3517 case Instruction::Call: { 3518 auto *CI = cast<const CallInst>(Inst); 3519 const Function *Callee = CI->getCalledFunction(); 3520 3521 // The called function could have undefined behavior or side-effects, even 3522 // if marked readnone nounwind. 3523 return Callee && Callee->isSpeculatable(); 3524 } 3525 case Instruction::VAArg: 3526 case Instruction::Alloca: 3527 case Instruction::Invoke: 3528 case Instruction::PHI: 3529 case Instruction::Store: 3530 case Instruction::Ret: 3531 case Instruction::Br: 3532 case Instruction::IndirectBr: 3533 case Instruction::Switch: 3534 case Instruction::Unreachable: 3535 case Instruction::Fence: 3536 case Instruction::AtomicRMW: 3537 case Instruction::AtomicCmpXchg: 3538 case Instruction::LandingPad: 3539 case Instruction::Resume: 3540 case Instruction::CatchSwitch: 3541 case Instruction::CatchPad: 3542 case Instruction::CatchRet: 3543 case Instruction::CleanupPad: 3544 case Instruction::CleanupRet: 3545 return false; // Misc instructions which have effects 3546 } 3547 } 3548 3549 bool llvm::mayBeMemoryDependent(const Instruction &I) { 3550 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 3551 } 3552 3553 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS, 3554 const Value *RHS, 3555 const DataLayout &DL, 3556 AssumptionCache *AC, 3557 const Instruction *CxtI, 3558 const DominatorTree *DT) { 3559 // Multiplying n * m significant bits yields a result of n + m significant 3560 // bits. If the total number of significant bits does not exceed the 3561 // result bit width (minus 1), there is no overflow. 3562 // This means if we have enough leading zero bits in the operands 3563 // we can guarantee that the result does not overflow. 3564 // Ref: "Hacker's Delight" by Henry Warren 3565 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 3566 KnownBits LHSKnown(BitWidth); 3567 KnownBits RHSKnown(BitWidth); 3568 computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT); 3569 computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT); 3570 // Note that underestimating the number of zero bits gives a more 3571 // conservative answer. 3572 unsigned ZeroBits = LHSKnown.countMinLeadingZeros() + 3573 RHSKnown.countMinLeadingZeros(); 3574 // First handle the easy case: if we have enough zero bits there's 3575 // definitely no overflow. 3576 if (ZeroBits >= BitWidth) 3577 return OverflowResult::NeverOverflows; 3578 3579 // Get the largest possible values for each operand. 3580 APInt LHSMax = ~LHSKnown.Zero; 3581 APInt RHSMax = ~RHSKnown.Zero; 3582 3583 // We know the multiply operation doesn't overflow if the maximum values for 3584 // each operand will not overflow after we multiply them together. 3585 bool MaxOverflow; 3586 (void)LHSMax.umul_ov(RHSMax, MaxOverflow); 3587 if (!MaxOverflow) 3588 return OverflowResult::NeverOverflows; 3589 3590 // We know it always overflows if multiplying the smallest possible values for 3591 // the operands also results in overflow. 3592 bool MinOverflow; 3593 (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow); 3594 if (MinOverflow) 3595 return OverflowResult::AlwaysOverflows; 3596 3597 return OverflowResult::MayOverflow; 3598 } 3599 3600 OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS, 3601 const Value *RHS, 3602 const DataLayout &DL, 3603 AssumptionCache *AC, 3604 const Instruction *CxtI, 3605 const DominatorTree *DT) { 3606 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 3607 if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) { 3608 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 3609 3610 if (LHSKnown.isNegative() && RHSKnown.isNegative()) { 3611 // The sign bit is set in both cases: this MUST overflow. 3612 // Create a simple add instruction, and insert it into the struct. 3613 return OverflowResult::AlwaysOverflows; 3614 } 3615 3616 if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) { 3617 // The sign bit is clear in both cases: this CANNOT overflow. 3618 // Create a simple add instruction, and insert it into the struct. 3619 return OverflowResult::NeverOverflows; 3620 } 3621 } 3622 3623 return OverflowResult::MayOverflow; 3624 } 3625 3626 /// \brief Return true if we can prove that adding the two values of the 3627 /// knownbits will not overflow. 3628 /// Otherwise return false. 3629 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown, 3630 const KnownBits &RHSKnown) { 3631 // Addition of two 2's complement numbers having opposite signs will never 3632 // overflow. 3633 if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) || 3634 (LHSKnown.isNonNegative() && RHSKnown.isNegative())) 3635 return true; 3636 3637 // If either of the values is known to be non-negative, adding them can only 3638 // overflow if the second is also non-negative, so we can assume that. 3639 // Two non-negative numbers will only overflow if there is a carry to the 3640 // sign bit, so we can check if even when the values are as big as possible 3641 // there is no overflow to the sign bit. 3642 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) { 3643 APInt MaxLHS = ~LHSKnown.Zero; 3644 MaxLHS.clearSignBit(); 3645 APInt MaxRHS = ~RHSKnown.Zero; 3646 MaxRHS.clearSignBit(); 3647 APInt Result = std::move(MaxLHS) + std::move(MaxRHS); 3648 return Result.isSignBitClear(); 3649 } 3650 3651 // If either of the values is known to be negative, adding them can only 3652 // overflow if the second is also negative, so we can assume that. 3653 // Two negative number will only overflow if there is no carry to the sign 3654 // bit, so we can check if even when the values are as small as possible 3655 // there is overflow to the sign bit. 3656 if (LHSKnown.isNegative() || RHSKnown.isNegative()) { 3657 APInt MinLHS = LHSKnown.One; 3658 MinLHS.clearSignBit(); 3659 APInt MinRHS = RHSKnown.One; 3660 MinRHS.clearSignBit(); 3661 APInt Result = std::move(MinLHS) + std::move(MinRHS); 3662 return Result.isSignBitSet(); 3663 } 3664 3665 // If we reached here it means that we know nothing about the sign bits. 3666 // In this case we can't know if there will be an overflow, since by 3667 // changing the sign bits any two values can be made to overflow. 3668 return false; 3669 } 3670 3671 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 3672 const Value *RHS, 3673 const AddOperator *Add, 3674 const DataLayout &DL, 3675 AssumptionCache *AC, 3676 const Instruction *CxtI, 3677 const DominatorTree *DT) { 3678 if (Add && Add->hasNoSignedWrap()) { 3679 return OverflowResult::NeverOverflows; 3680 } 3681 3682 // If LHS and RHS each have at least two sign bits, the addition will look 3683 // like 3684 // 3685 // XX..... + 3686 // YY..... 3687 // 3688 // If the carry into the most significant position is 0, X and Y can't both 3689 // be 1 and therefore the carry out of the addition is also 0. 3690 // 3691 // If the carry into the most significant position is 1, X and Y can't both 3692 // be 0 and therefore the carry out of the addition is also 1. 3693 // 3694 // Since the carry into the most significant position is always equal to 3695 // the carry out of the addition, there is no signed overflow. 3696 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 3697 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 3698 return OverflowResult::NeverOverflows; 3699 3700 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); 3701 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); 3702 3703 if (checkRippleForSignedAdd(LHSKnown, RHSKnown)) 3704 return OverflowResult::NeverOverflows; 3705 3706 // The remaining code needs Add to be available. Early returns if not so. 3707 if (!Add) 3708 return OverflowResult::MayOverflow; 3709 3710 // If the sign of Add is the same as at least one of the operands, this add 3711 // CANNOT overflow. This is particularly useful when the sum is 3712 // @llvm.assume'ed non-negative rather than proved so from analyzing its 3713 // operands. 3714 bool LHSOrRHSKnownNonNegative = 3715 (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()); 3716 bool LHSOrRHSKnownNegative = 3717 (LHSKnown.isNegative() || RHSKnown.isNegative()); 3718 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 3719 KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT); 3720 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 3721 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) { 3722 return OverflowResult::NeverOverflows; 3723 } 3724 } 3725 3726 return OverflowResult::MayOverflow; 3727 } 3728 3729 bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, 3730 const DominatorTree &DT) { 3731 #ifndef NDEBUG 3732 auto IID = II->getIntrinsicID(); 3733 assert((IID == Intrinsic::sadd_with_overflow || 3734 IID == Intrinsic::uadd_with_overflow || 3735 IID == Intrinsic::ssub_with_overflow || 3736 IID == Intrinsic::usub_with_overflow || 3737 IID == Intrinsic::smul_with_overflow || 3738 IID == Intrinsic::umul_with_overflow) && 3739 "Not an overflow intrinsic!"); 3740 #endif 3741 3742 SmallVector<const BranchInst *, 2> GuardingBranches; 3743 SmallVector<const ExtractValueInst *, 2> Results; 3744 3745 for (const User *U : II->users()) { 3746 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 3747 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 3748 3749 if (EVI->getIndices()[0] == 0) 3750 Results.push_back(EVI); 3751 else { 3752 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 3753 3754 for (const auto *U : EVI->users()) 3755 if (const auto *B = dyn_cast<BranchInst>(U)) { 3756 assert(B->isConditional() && "How else is it using an i1?"); 3757 GuardingBranches.push_back(B); 3758 } 3759 } 3760 } else { 3761 // We are using the aggregate directly in a way we don't want to analyze 3762 // here (storing it to a global, say). 3763 return false; 3764 } 3765 } 3766 3767 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 3768 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 3769 if (!NoWrapEdge.isSingleEdge()) 3770 return false; 3771 3772 // Check if all users of the add are provably no-wrap. 3773 for (const auto *Result : Results) { 3774 // If the extractvalue itself is not executed on overflow, the we don't 3775 // need to check each use separately, since domination is transitive. 3776 if (DT.dominates(NoWrapEdge, Result->getParent())) 3777 continue; 3778 3779 for (auto &RU : Result->uses()) 3780 if (!DT.dominates(NoWrapEdge, RU)) 3781 return false; 3782 } 3783 3784 return true; 3785 }; 3786 3787 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); 3788 } 3789 3790 3791 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 3792 const DataLayout &DL, 3793 AssumptionCache *AC, 3794 const Instruction *CxtI, 3795 const DominatorTree *DT) { 3796 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 3797 Add, DL, AC, CxtI, DT); 3798 } 3799 3800 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 3801 const Value *RHS, 3802 const DataLayout &DL, 3803 AssumptionCache *AC, 3804 const Instruction *CxtI, 3805 const DominatorTree *DT) { 3806 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 3807 } 3808 3809 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 3810 // A memory operation returns normally if it isn't volatile. A volatile 3811 // operation is allowed to trap. 3812 // 3813 // An atomic operation isn't guaranteed to return in a reasonable amount of 3814 // time because it's possible for another thread to interfere with it for an 3815 // arbitrary length of time, but programs aren't allowed to rely on that. 3816 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 3817 return !LI->isVolatile(); 3818 if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 3819 return !SI->isVolatile(); 3820 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 3821 return !CXI->isVolatile(); 3822 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 3823 return !RMWI->isVolatile(); 3824 if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I)) 3825 return !MII->isVolatile(); 3826 3827 // If there is no successor, then execution can't transfer to it. 3828 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) 3829 return !CRI->unwindsToCaller(); 3830 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) 3831 return !CatchSwitch->unwindsToCaller(); 3832 if (isa<ResumeInst>(I)) 3833 return false; 3834 if (isa<ReturnInst>(I)) 3835 return false; 3836 if (isa<UnreachableInst>(I)) 3837 return false; 3838 3839 // Calls can throw, or contain an infinite loop, or kill the process. 3840 if (auto CS = ImmutableCallSite(I)) { 3841 // Call sites that throw have implicit non-local control flow. 3842 if (!CS.doesNotThrow()) 3843 return false; 3844 3845 // Non-throwing call sites can loop infinitely, call exit/pthread_exit 3846 // etc. and thus not return. However, LLVM already assumes that 3847 // 3848 // - Thread exiting actions are modeled as writes to memory invisible to 3849 // the program. 3850 // 3851 // - Loops that don't have side effects (side effects are volatile/atomic 3852 // stores and IO) always terminate (see http://llvm.org/PR965). 3853 // Furthermore IO itself is also modeled as writes to memory invisible to 3854 // the program. 3855 // 3856 // We rely on those assumptions here, and use the memory effects of the call 3857 // target as a proxy for checking that it always returns. 3858 3859 // FIXME: This isn't aggressive enough; a call which only writes to a global 3860 // is guaranteed to return. 3861 return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() || 3862 match(I, m_Intrinsic<Intrinsic::assume>()); 3863 } 3864 3865 // Other instructions return normally. 3866 return true; 3867 } 3868 3869 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 3870 const Loop *L) { 3871 // The loop header is guaranteed to be executed for every iteration. 3872 // 3873 // FIXME: Relax this constraint to cover all basic blocks that are 3874 // guaranteed to be executed at every iteration. 3875 if (I->getParent() != L->getHeader()) return false; 3876 3877 for (const Instruction &LI : *L->getHeader()) { 3878 if (&LI == I) return true; 3879 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 3880 } 3881 llvm_unreachable("Instruction not contained in its own parent basic block."); 3882 } 3883 3884 bool llvm::propagatesFullPoison(const Instruction *I) { 3885 switch (I->getOpcode()) { 3886 case Instruction::Add: 3887 case Instruction::Sub: 3888 case Instruction::Xor: 3889 case Instruction::Trunc: 3890 case Instruction::BitCast: 3891 case Instruction::AddrSpaceCast: 3892 case Instruction::Mul: 3893 case Instruction::Shl: 3894 case Instruction::GetElementPtr: 3895 // These operations all propagate poison unconditionally. Note that poison 3896 // is not any particular value, so xor or subtraction of poison with 3897 // itself still yields poison, not zero. 3898 return true; 3899 3900 case Instruction::AShr: 3901 case Instruction::SExt: 3902 // For these operations, one bit of the input is replicated across 3903 // multiple output bits. A replicated poison bit is still poison. 3904 return true; 3905 3906 case Instruction::ICmp: 3907 // Comparing poison with any value yields poison. This is why, for 3908 // instance, x s< (x +nsw 1) can be folded to true. 3909 return true; 3910 3911 default: 3912 return false; 3913 } 3914 } 3915 3916 const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) { 3917 switch (I->getOpcode()) { 3918 case Instruction::Store: 3919 return cast<StoreInst>(I)->getPointerOperand(); 3920 3921 case Instruction::Load: 3922 return cast<LoadInst>(I)->getPointerOperand(); 3923 3924 case Instruction::AtomicCmpXchg: 3925 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 3926 3927 case Instruction::AtomicRMW: 3928 return cast<AtomicRMWInst>(I)->getPointerOperand(); 3929 3930 case Instruction::UDiv: 3931 case Instruction::SDiv: 3932 case Instruction::URem: 3933 case Instruction::SRem: 3934 return I->getOperand(1); 3935 3936 default: 3937 return nullptr; 3938 } 3939 } 3940 3941 bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) { 3942 // We currently only look for uses of poison values within the same basic 3943 // block, as that makes it easier to guarantee that the uses will be 3944 // executed given that PoisonI is executed. 3945 // 3946 // FIXME: Expand this to consider uses beyond the same basic block. To do 3947 // this, look out for the distinction between post-dominance and strong 3948 // post-dominance. 3949 const BasicBlock *BB = PoisonI->getParent(); 3950 3951 // Set of instructions that we have proved will yield poison if PoisonI 3952 // does. 3953 SmallSet<const Value *, 16> YieldsPoison; 3954 SmallSet<const BasicBlock *, 4> Visited; 3955 YieldsPoison.insert(PoisonI); 3956 Visited.insert(PoisonI->getParent()); 3957 3958 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end(); 3959 3960 unsigned Iter = 0; 3961 while (Iter++ < MaxDepth) { 3962 for (auto &I : make_range(Begin, End)) { 3963 if (&I != PoisonI) { 3964 const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I); 3965 if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) 3966 return true; 3967 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 3968 return false; 3969 } 3970 3971 // Mark poison that propagates from I through uses of I. 3972 if (YieldsPoison.count(&I)) { 3973 for (const User *User : I.users()) { 3974 const Instruction *UserI = cast<Instruction>(User); 3975 if (propagatesFullPoison(UserI)) 3976 YieldsPoison.insert(User); 3977 } 3978 } 3979 } 3980 3981 if (auto *NextBB = BB->getSingleSuccessor()) { 3982 if (Visited.insert(NextBB).second) { 3983 BB = NextBB; 3984 Begin = BB->getFirstNonPHI()->getIterator(); 3985 End = BB->end(); 3986 continue; 3987 } 3988 } 3989 3990 break; 3991 } 3992 return false; 3993 } 3994 3995 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 3996 if (FMF.noNaNs()) 3997 return true; 3998 3999 if (auto *C = dyn_cast<ConstantFP>(V)) 4000 return !C->isNaN(); 4001 return false; 4002 } 4003 4004 static bool isKnownNonZero(const Value *V) { 4005 if (auto *C = dyn_cast<ConstantFP>(V)) 4006 return !C->isZero(); 4007 return false; 4008 } 4009 4010 /// Match clamp pattern for float types without care about NaNs or signed zeros. 4011 /// Given non-min/max outer cmp/select from the clamp pattern this 4012 /// function recognizes if it can be substitued by a "canonical" min/max 4013 /// pattern. 4014 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, 4015 Value *CmpLHS, Value *CmpRHS, 4016 Value *TrueVal, Value *FalseVal, 4017 Value *&LHS, Value *&RHS) { 4018 // Try to match 4019 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) 4020 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) 4021 // and return description of the outer Max/Min. 4022 4023 // First, check if select has inverse order: 4024 if (CmpRHS == FalseVal) { 4025 std::swap(TrueVal, FalseVal); 4026 Pred = CmpInst::getInversePredicate(Pred); 4027 } 4028 4029 // Assume success now. If there's no match, callers should not use these anyway. 4030 LHS = TrueVal; 4031 RHS = FalseVal; 4032 4033 const APFloat *FC1; 4034 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) 4035 return {SPF_UNKNOWN, SPNB_NA, false}; 4036 4037 const APFloat *FC2; 4038 switch (Pred) { 4039 case CmpInst::FCMP_OLT: 4040 case CmpInst::FCMP_OLE: 4041 case CmpInst::FCMP_ULT: 4042 case CmpInst::FCMP_ULE: 4043 if (match(FalseVal, 4044 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), 4045 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4046 FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan) 4047 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; 4048 break; 4049 case CmpInst::FCMP_OGT: 4050 case CmpInst::FCMP_OGE: 4051 case CmpInst::FCMP_UGT: 4052 case CmpInst::FCMP_UGE: 4053 if (match(FalseVal, 4054 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), 4055 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && 4056 FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan) 4057 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; 4058 break; 4059 default: 4060 break; 4061 } 4062 4063 return {SPF_UNKNOWN, SPNB_NA, false}; 4064 } 4065 4066 /// Match non-obvious integer minimum and maximum sequences. 4067 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 4068 Value *CmpLHS, Value *CmpRHS, 4069 Value *TrueVal, Value *FalseVal, 4070 Value *&LHS, Value *&RHS) { 4071 // Assume success. If there's no match, callers should not use these anyway. 4072 LHS = TrueVal; 4073 RHS = FalseVal; 4074 4075 // Recognize variations of: 4076 // CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 4077 const APInt *C1; 4078 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 4079 const APInt *C2; 4080 4081 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 4082 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 4083 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 4084 return {SPF_SMAX, SPNB_NA, false}; 4085 4086 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 4087 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 4088 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 4089 return {SPF_SMIN, SPNB_NA, false}; 4090 4091 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 4092 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 4093 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 4094 return {SPF_UMAX, SPNB_NA, false}; 4095 4096 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 4097 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 4098 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 4099 return {SPF_UMIN, SPNB_NA, false}; 4100 } 4101 4102 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 4103 return {SPF_UNKNOWN, SPNB_NA, false}; 4104 4105 // Z = X -nsw Y 4106 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) 4107 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) 4108 if (match(TrueVal, m_Zero()) && 4109 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4110 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4111 4112 // Z = X -nsw Y 4113 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) 4114 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) 4115 if (match(FalseVal, m_Zero()) && 4116 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 4117 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4118 4119 if (!match(CmpRHS, m_APInt(C1))) 4120 return {SPF_UNKNOWN, SPNB_NA, false}; 4121 4122 // An unsigned min/max can be written with a signed compare. 4123 const APInt *C2; 4124 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 4125 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 4126 // Is the sign bit set? 4127 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 4128 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 4129 if (Pred == CmpInst::ICMP_SLT && *C1 == 0 && C2->isMaxSignedValue()) 4130 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4131 4132 // Is the sign bit clear? 4133 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 4134 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 4135 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && 4136 C2->isMinSignedValue()) 4137 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 4138 } 4139 4140 // Look through 'not' ops to find disguised signed min/max. 4141 // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C) 4142 // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C) 4143 if (match(TrueVal, m_Not(m_Specific(CmpLHS))) && 4144 match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2) 4145 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 4146 4147 // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X) 4148 // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X) 4149 if (match(FalseVal, m_Not(m_Specific(CmpLHS))) && 4150 match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2) 4151 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 4152 4153 return {SPF_UNKNOWN, SPNB_NA, false}; 4154 } 4155 4156 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 4157 FastMathFlags FMF, 4158 Value *CmpLHS, Value *CmpRHS, 4159 Value *TrueVal, Value *FalseVal, 4160 Value *&LHS, Value *&RHS) { 4161 LHS = CmpLHS; 4162 RHS = CmpRHS; 4163 4164 // If the predicate is an "or-equal" (FP) predicate, then signed zeroes may 4165 // return inconsistent results between implementations. 4166 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 4167 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 4168 // Therefore we behave conservatively and only proceed if at least one of the 4169 // operands is known to not be zero, or if we don't care about signed zeroes. 4170 switch (Pred) { 4171 default: break; 4172 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 4173 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 4174 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4175 !isKnownNonZero(CmpRHS)) 4176 return {SPF_UNKNOWN, SPNB_NA, false}; 4177 } 4178 4179 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 4180 bool Ordered = false; 4181 4182 // When given one NaN and one non-NaN input: 4183 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 4184 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 4185 // ordered comparison fails), which could be NaN or non-NaN. 4186 // so here we discover exactly what NaN behavior is required/accepted. 4187 if (CmpInst::isFPPredicate(Pred)) { 4188 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 4189 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 4190 4191 if (LHSSafe && RHSSafe) { 4192 // Both operands are known non-NaN. 4193 NaNBehavior = SPNB_RETURNS_ANY; 4194 } else if (CmpInst::isOrdered(Pred)) { 4195 // An ordered comparison will return false when given a NaN, so it 4196 // returns the RHS. 4197 Ordered = true; 4198 if (LHSSafe) 4199 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 4200 NaNBehavior = SPNB_RETURNS_NAN; 4201 else if (RHSSafe) 4202 NaNBehavior = SPNB_RETURNS_OTHER; 4203 else 4204 // Completely unsafe. 4205 return {SPF_UNKNOWN, SPNB_NA, false}; 4206 } else { 4207 Ordered = false; 4208 // An unordered comparison will return true when given a NaN, so it 4209 // returns the LHS. 4210 if (LHSSafe) 4211 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 4212 NaNBehavior = SPNB_RETURNS_OTHER; 4213 else if (RHSSafe) 4214 NaNBehavior = SPNB_RETURNS_NAN; 4215 else 4216 // Completely unsafe. 4217 return {SPF_UNKNOWN, SPNB_NA, false}; 4218 } 4219 } 4220 4221 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 4222 std::swap(CmpLHS, CmpRHS); 4223 Pred = CmpInst::getSwappedPredicate(Pred); 4224 if (NaNBehavior == SPNB_RETURNS_NAN) 4225 NaNBehavior = SPNB_RETURNS_OTHER; 4226 else if (NaNBehavior == SPNB_RETURNS_OTHER) 4227 NaNBehavior = SPNB_RETURNS_NAN; 4228 Ordered = !Ordered; 4229 } 4230 4231 // ([if]cmp X, Y) ? X : Y 4232 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 4233 switch (Pred) { 4234 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 4235 case ICmpInst::ICMP_UGT: 4236 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 4237 case ICmpInst::ICMP_SGT: 4238 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 4239 case ICmpInst::ICMP_ULT: 4240 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 4241 case ICmpInst::ICMP_SLT: 4242 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 4243 case FCmpInst::FCMP_UGT: 4244 case FCmpInst::FCMP_UGE: 4245 case FCmpInst::FCMP_OGT: 4246 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 4247 case FCmpInst::FCMP_ULT: 4248 case FCmpInst::FCMP_ULE: 4249 case FCmpInst::FCMP_OLT: 4250 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 4251 } 4252 } 4253 4254 const APInt *C1; 4255 if (match(CmpRHS, m_APInt(C1))) { 4256 if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) || 4257 (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) { 4258 4259 // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X 4260 // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X 4261 if (Pred == ICmpInst::ICMP_SGT && (*C1 == 0 || C1->isAllOnesValue())) { 4262 return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 4263 } 4264 4265 // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X 4266 // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X 4267 if (Pred == ICmpInst::ICMP_SLT && (*C1 == 0 || *C1 == 1)) { 4268 return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false}; 4269 } 4270 } 4271 } 4272 4273 if (CmpInst::isIntPredicate(Pred)) 4274 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 4275 4276 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar 4277 // may return either -0.0 or 0.0, so fcmp/select pair has stricter 4278 // semantics than minNum. Be conservative in such case. 4279 if (NaNBehavior != SPNB_RETURNS_ANY || 4280 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 4281 !isKnownNonZero(CmpRHS))) 4282 return {SPF_UNKNOWN, SPNB_NA, false}; 4283 4284 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 4285 } 4286 4287 /// Helps to match a select pattern in case of a type mismatch. 4288 /// 4289 /// The function processes the case when type of true and false values of a 4290 /// select instruction differs from type of the cmp instruction operands because 4291 /// of a cast instructon. The function checks if it is legal to move the cast 4292 /// operation after "select". If yes, it returns the new second value of 4293 /// "select" (with the assumption that cast is moved): 4294 /// 1. As operand of cast instruction when both values of "select" are same cast 4295 /// instructions. 4296 /// 2. As restored constant (by applying reverse cast operation) when the first 4297 /// value of the "select" is a cast operation and the second value is a 4298 /// constant. 4299 /// NOTE: We return only the new second value because the first value could be 4300 /// accessed as operand of cast instruction. 4301 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 4302 Instruction::CastOps *CastOp) { 4303 auto *Cast1 = dyn_cast<CastInst>(V1); 4304 if (!Cast1) 4305 return nullptr; 4306 4307 *CastOp = Cast1->getOpcode(); 4308 Type *SrcTy = Cast1->getSrcTy(); 4309 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 4310 // If V1 and V2 are both the same cast from the same type, look through V1. 4311 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 4312 return Cast2->getOperand(0); 4313 return nullptr; 4314 } 4315 4316 auto *C = dyn_cast<Constant>(V2); 4317 if (!C) 4318 return nullptr; 4319 4320 Constant *CastedTo = nullptr; 4321 switch (*CastOp) { 4322 case Instruction::ZExt: 4323 if (CmpI->isUnsigned()) 4324 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 4325 break; 4326 case Instruction::SExt: 4327 if (CmpI->isSigned()) 4328 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 4329 break; 4330 case Instruction::Trunc: 4331 Constant *CmpConst; 4332 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && 4333 CmpConst->getType() == SrcTy) { 4334 // Here we have the following case: 4335 // 4336 // %cond = cmp iN %x, CmpConst 4337 // %tr = trunc iN %x to iK 4338 // %narrowsel = select i1 %cond, iK %t, iK C 4339 // 4340 // We can always move trunc after select operation: 4341 // 4342 // %cond = cmp iN %x, CmpConst 4343 // %widesel = select i1 %cond, iN %x, iN CmpConst 4344 // %tr = trunc iN %widesel to iK 4345 // 4346 // Note that C could be extended in any way because we don't care about 4347 // upper bits after truncation. It can't be abs pattern, because it would 4348 // look like: 4349 // 4350 // select i1 %cond, x, -x. 4351 // 4352 // So only min/max pattern could be matched. Such match requires widened C 4353 // == CmpConst. That is why set widened C = CmpConst, condition trunc 4354 // CmpConst == C is checked below. 4355 CastedTo = CmpConst; 4356 } else { 4357 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 4358 } 4359 break; 4360 case Instruction::FPTrunc: 4361 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 4362 break; 4363 case Instruction::FPExt: 4364 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 4365 break; 4366 case Instruction::FPToUI: 4367 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 4368 break; 4369 case Instruction::FPToSI: 4370 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 4371 break; 4372 case Instruction::UIToFP: 4373 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 4374 break; 4375 case Instruction::SIToFP: 4376 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 4377 break; 4378 default: 4379 break; 4380 } 4381 4382 if (!CastedTo) 4383 return nullptr; 4384 4385 // Make sure the cast doesn't lose any information. 4386 Constant *CastedBack = 4387 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 4388 if (CastedBack != C) 4389 return nullptr; 4390 4391 return CastedTo; 4392 } 4393 4394 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 4395 Instruction::CastOps *CastOp) { 4396 SelectInst *SI = dyn_cast<SelectInst>(V); 4397 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 4398 4399 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 4400 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 4401 4402 CmpInst::Predicate Pred = CmpI->getPredicate(); 4403 Value *CmpLHS = CmpI->getOperand(0); 4404 Value *CmpRHS = CmpI->getOperand(1); 4405 Value *TrueVal = SI->getTrueValue(); 4406 Value *FalseVal = SI->getFalseValue(); 4407 FastMathFlags FMF; 4408 if (isa<FPMathOperator>(CmpI)) 4409 FMF = CmpI->getFastMathFlags(); 4410 4411 // Bail out early. 4412 if (CmpI->isEquality()) 4413 return {SPF_UNKNOWN, SPNB_NA, false}; 4414 4415 // Deal with type mismatches. 4416 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 4417 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) 4418 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4419 cast<CastInst>(TrueVal)->getOperand(0), C, 4420 LHS, RHS); 4421 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) 4422 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 4423 C, cast<CastInst>(FalseVal)->getOperand(0), 4424 LHS, RHS); 4425 } 4426 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 4427 LHS, RHS); 4428 } 4429 4430 /// Return true if "icmp Pred LHS RHS" is always true. 4431 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, 4432 const Value *RHS, const DataLayout &DL, 4433 unsigned Depth) { 4434 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 4435 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 4436 return true; 4437 4438 switch (Pred) { 4439 default: 4440 return false; 4441 4442 case CmpInst::ICMP_SLE: { 4443 const APInt *C; 4444 4445 // LHS s<= LHS +_{nsw} C if C >= 0 4446 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 4447 return !C->isNegative(); 4448 return false; 4449 } 4450 4451 case CmpInst::ICMP_ULE: { 4452 const APInt *C; 4453 4454 // LHS u<= LHS +_{nuw} C for any C 4455 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 4456 return true; 4457 4458 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 4459 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 4460 const Value *&X, 4461 const APInt *&CA, const APInt *&CB) { 4462 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 4463 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 4464 return true; 4465 4466 // If X & C == 0 then (X | C) == X +_{nuw} C 4467 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 4468 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 4469 KnownBits Known(CA->getBitWidth()); 4470 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, 4471 /*CxtI*/ nullptr, /*DT*/ nullptr); 4472 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 4473 return true; 4474 } 4475 4476 return false; 4477 }; 4478 4479 const Value *X; 4480 const APInt *CLHS, *CRHS; 4481 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 4482 return CLHS->ule(*CRHS); 4483 4484 return false; 4485 } 4486 } 4487 } 4488 4489 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 4490 /// ALHS ARHS" is true. Otherwise, return None. 4491 static Optional<bool> 4492 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 4493 const Value *ARHS, const Value *BLHS, const Value *BRHS, 4494 const DataLayout &DL, unsigned Depth) { 4495 switch (Pred) { 4496 default: 4497 return None; 4498 4499 case CmpInst::ICMP_SLT: 4500 case CmpInst::ICMP_SLE: 4501 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && 4502 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) 4503 return true; 4504 return None; 4505 4506 case CmpInst::ICMP_ULT: 4507 case CmpInst::ICMP_ULE: 4508 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && 4509 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) 4510 return true; 4511 return None; 4512 } 4513 } 4514 4515 /// Return true if the operands of the two compares match. IsSwappedOps is true 4516 /// when the operands match, but are swapped. 4517 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 4518 const Value *BLHS, const Value *BRHS, 4519 bool &IsSwappedOps) { 4520 4521 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 4522 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 4523 return IsMatchingOps || IsSwappedOps; 4524 } 4525 4526 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is 4527 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS 4528 /// BRHS" is false. Otherwise, return None if we can't infer anything. 4529 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 4530 const Value *ALHS, 4531 const Value *ARHS, 4532 CmpInst::Predicate BPred, 4533 const Value *BLHS, 4534 const Value *BRHS, 4535 bool IsSwappedOps) { 4536 // Canonicalize the operands so they're matching. 4537 if (IsSwappedOps) { 4538 std::swap(BLHS, BRHS); 4539 BPred = ICmpInst::getSwappedPredicate(BPred); 4540 } 4541 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 4542 return true; 4543 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 4544 return false; 4545 4546 return None; 4547 } 4548 4549 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is 4550 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS 4551 /// C2" is false. Otherwise, return None if we can't infer anything. 4552 static Optional<bool> 4553 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, const Value *ALHS, 4554 const ConstantInt *C1, 4555 CmpInst::Predicate BPred, 4556 const Value *BLHS, const ConstantInt *C2) { 4557 assert(ALHS == BLHS && "LHS operands must match."); 4558 ConstantRange DomCR = 4559 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 4560 ConstantRange CR = 4561 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); 4562 ConstantRange Intersection = DomCR.intersectWith(CR); 4563 ConstantRange Difference = DomCR.difference(CR); 4564 if (Intersection.isEmptySet()) 4565 return false; 4566 if (Difference.isEmptySet()) 4567 return true; 4568 return None; 4569 } 4570 4571 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 4572 /// false. Otherwise, return None if we can't infer anything. 4573 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS, 4574 const ICmpInst *RHS, 4575 const DataLayout &DL, bool LHSIsTrue, 4576 unsigned Depth) { 4577 Value *ALHS = LHS->getOperand(0); 4578 Value *ARHS = LHS->getOperand(1); 4579 // The rest of the logic assumes the LHS condition is true. If that's not the 4580 // case, invert the predicate to make it so. 4581 ICmpInst::Predicate APred = 4582 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); 4583 4584 Value *BLHS = RHS->getOperand(0); 4585 Value *BRHS = RHS->getOperand(1); 4586 ICmpInst::Predicate BPred = RHS->getPredicate(); 4587 4588 // Can we infer anything when the two compares have matching operands? 4589 bool IsSwappedOps; 4590 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) { 4591 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 4592 APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps)) 4593 return Implication; 4594 // No amount of additional analysis will infer the second condition, so 4595 // early exit. 4596 return None; 4597 } 4598 4599 // Can we infer anything when the LHS operands match and the RHS operands are 4600 // constants (not necessarily matching)? 4601 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 4602 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 4603 APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS, 4604 cast<ConstantInt>(BRHS))) 4605 return Implication; 4606 // No amount of additional analysis will infer the second condition, so 4607 // early exit. 4608 return None; 4609 } 4610 4611 if (APred == BPred) 4612 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth); 4613 return None; 4614 } 4615 4616 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 4617 /// false. Otherwise, return None if we can't infer anything. We expect the 4618 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction. 4619 static Optional<bool> isImpliedCondAndOr(const BinaryOperator *LHS, 4620 const ICmpInst *RHS, 4621 const DataLayout &DL, bool LHSIsTrue, 4622 unsigned Depth) { 4623 // The LHS must be an 'or' or an 'and' instruction. 4624 assert((LHS->getOpcode() == Instruction::And || 4625 LHS->getOpcode() == Instruction::Or) && 4626 "Expected LHS to be 'and' or 'or'."); 4627 4628 assert(Depth <= MaxDepth && "Hit recursion limit"); 4629 4630 // If the result of an 'or' is false, then we know both legs of the 'or' are 4631 // false. Similarly, if the result of an 'and' is true, then we know both 4632 // legs of the 'and' are true. 4633 Value *ALHS, *ARHS; 4634 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) || 4635 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) { 4636 // FIXME: Make this non-recursion. 4637 if (Optional<bool> Implication = 4638 isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1)) 4639 return Implication; 4640 if (Optional<bool> Implication = 4641 isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1)) 4642 return Implication; 4643 return None; 4644 } 4645 return None; 4646 } 4647 4648 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 4649 const DataLayout &DL, bool LHSIsTrue, 4650 unsigned Depth) { 4651 // Bail out when we hit the limit. 4652 if (Depth == MaxDepth) 4653 return None; 4654 4655 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for 4656 // example. 4657 if (LHS->getType() != RHS->getType()) 4658 return None; 4659 4660 Type *OpTy = LHS->getType(); 4661 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!"); 4662 4663 // LHS ==> RHS by definition 4664 if (LHS == RHS) 4665 return LHSIsTrue; 4666 4667 // FIXME: Extending the code below to handle vectors. 4668 if (OpTy->isVectorTy()) 4669 return None; 4670 4671 assert(OpTy->isIntegerTy(1) && "implied by above"); 4672 4673 // Both LHS and RHS are icmps. 4674 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); 4675 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS); 4676 if (LHSCmp && RHSCmp) 4677 return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth); 4678 4679 // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be 4680 // an icmp. FIXME: Add support for and/or on the RHS. 4681 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS); 4682 if (LHSBO && RHSCmp) { 4683 if ((LHSBO->getOpcode() == Instruction::And || 4684 LHSBO->getOpcode() == Instruction::Or)) 4685 return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth); 4686 } 4687 return None; 4688 } 4689