1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains routines that help analyze properties that chains of 10 // computations have. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/ValueTracking.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/None.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/StringRef.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AssumeBundleQueries.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/GuardUtils.h" 30 #include "llvm/Analysis/InstructionSimplify.h" 31 #include "llvm/Analysis/Loads.h" 32 #include "llvm/Analysis/LoopInfo.h" 33 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 34 #include "llvm/Analysis/TargetLibraryInfo.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/ConstantRange.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DerivedTypes.h" 42 #include "llvm/IR/DiagnosticInfo.h" 43 #include "llvm/IR/Dominators.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/GetElementPtrTypeIterator.h" 46 #include "llvm/IR/GlobalAlias.h" 47 #include "llvm/IR/GlobalValue.h" 48 #include "llvm/IR/GlobalVariable.h" 49 #include "llvm/IR/InstrTypes.h" 50 #include "llvm/IR/Instruction.h" 51 #include "llvm/IR/Instructions.h" 52 #include "llvm/IR/IntrinsicInst.h" 53 #include "llvm/IR/Intrinsics.h" 54 #include "llvm/IR/IntrinsicsAArch64.h" 55 #include "llvm/IR/IntrinsicsX86.h" 56 #include "llvm/IR/LLVMContext.h" 57 #include "llvm/IR/Metadata.h" 58 #include "llvm/IR/Module.h" 59 #include "llvm/IR/Operator.h" 60 #include "llvm/IR/PatternMatch.h" 61 #include "llvm/IR/Type.h" 62 #include "llvm/IR/User.h" 63 #include "llvm/IR/Value.h" 64 #include "llvm/Support/Casting.h" 65 #include "llvm/Support/CommandLine.h" 66 #include "llvm/Support/Compiler.h" 67 #include "llvm/Support/ErrorHandling.h" 68 #include "llvm/Support/KnownBits.h" 69 #include "llvm/Support/MathExtras.h" 70 #include <algorithm> 71 #include <array> 72 #include <cassert> 73 #include <cstdint> 74 #include <iterator> 75 #include <utility> 76 77 using namespace llvm; 78 using namespace llvm::PatternMatch; 79 80 const unsigned MaxDepth = 6; 81 82 // Controls the number of uses of the value searched for possible 83 // dominating comparisons. 84 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 85 cl::Hidden, cl::init(20)); 86 87 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 88 /// returns the element type's bitwidth. 89 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 90 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 91 return BitWidth; 92 93 return DL.getPointerTypeSizeInBits(Ty); 94 } 95 96 namespace { 97 98 // Simplifying using an assume can only be done in a particular control-flow 99 // context (the context instruction provides that context). If an assume and 100 // the context instruction are not in the same block then the DT helps in 101 // figuring out if we can use it. 102 struct Query { 103 const DataLayout &DL; 104 AssumptionCache *AC; 105 const Instruction *CxtI; 106 const DominatorTree *DT; 107 108 // Unlike the other analyses, this may be a nullptr because not all clients 109 // provide it currently. 110 OptimizationRemarkEmitter *ORE; 111 112 /// Set of assumptions that should be excluded from further queries. 113 /// This is because of the potential for mutual recursion to cause 114 /// computeKnownBits to repeatedly visit the same assume intrinsic. The 115 /// classic case of this is assume(x = y), which will attempt to determine 116 /// bits in x from bits in y, which will attempt to determine bits in y from 117 /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call 118 /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo 119 /// (all of which can call computeKnownBits), and so on. 120 std::array<const Value *, MaxDepth> Excluded; 121 122 /// If true, it is safe to use metadata during simplification. 123 InstrInfoQuery IIQ; 124 125 unsigned NumExcluded = 0; 126 127 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 128 const DominatorTree *DT, bool UseInstrInfo, 129 OptimizationRemarkEmitter *ORE = nullptr) 130 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {} 131 132 Query(const Query &Q, const Value *NewExcl) 133 : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ), 134 NumExcluded(Q.NumExcluded) { 135 Excluded = Q.Excluded; 136 Excluded[NumExcluded++] = NewExcl; 137 assert(NumExcluded <= Excluded.size()); 138 } 139 140 bool isExcluded(const Value *Value) const { 141 if (NumExcluded == 0) 142 return false; 143 auto End = Excluded.begin() + NumExcluded; 144 return std::find(Excluded.begin(), End, Value) != End; 145 } 146 }; 147 148 } // end anonymous namespace 149 150 // Given the provided Value and, potentially, a context instruction, return 151 // the preferred context instruction (if any). 152 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 153 // If we've been provided with a context instruction, then use that (provided 154 // it has been inserted). 155 if (CxtI && CxtI->getParent()) 156 return CxtI; 157 158 // If the value is really an already-inserted instruction, then use that. 159 CxtI = dyn_cast<Instruction>(V); 160 if (CxtI && CxtI->getParent()) 161 return CxtI; 162 163 return nullptr; 164 } 165 166 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, 167 const APInt &DemandedElts, 168 APInt &DemandedLHS, APInt &DemandedRHS) { 169 // The length of scalable vectors is unknown at compile time, thus we 170 // cannot check their values 171 if (isa<ScalableVectorType>(Shuf->getType())) 172 return false; 173 174 int NumElts = 175 cast<VectorType>(Shuf->getOperand(0)->getType())->getNumElements(); 176 int NumMaskElts = Shuf->getType()->getNumElements(); 177 DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts); 178 if (DemandedElts.isNullValue()) 179 return true; 180 // Simple case of a shuffle with zeroinitializer. 181 if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) { 182 DemandedLHS.setBit(0); 183 return true; 184 } 185 for (int i = 0; i != NumMaskElts; ++i) { 186 if (!DemandedElts[i]) 187 continue; 188 int M = Shuf->getMaskValue(i); 189 assert(M < (NumElts * 2) && "Invalid shuffle mask constant"); 190 191 // For undef elements, we don't know anything about the common state of 192 // the shuffle result. 193 if (M == -1) 194 return false; 195 if (M < NumElts) 196 DemandedLHS.setBit(M % NumElts); 197 else 198 DemandedRHS.setBit(M % NumElts); 199 } 200 201 return true; 202 } 203 204 static void computeKnownBits(const Value *V, const APInt &DemandedElts, 205 KnownBits &Known, unsigned Depth, const Query &Q); 206 207 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 208 const Query &Q) { 209 // FIXME: We currently have no way to represent the DemandedElts of a scalable 210 // vector 211 if (isa<ScalableVectorType>(V->getType())) { 212 Known.resetAll(); 213 return; 214 } 215 216 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 217 APInt DemandedElts = 218 FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1); 219 computeKnownBits(V, DemandedElts, Known, Depth, Q); 220 } 221 222 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 223 const DataLayout &DL, unsigned Depth, 224 AssumptionCache *AC, const Instruction *CxtI, 225 const DominatorTree *DT, 226 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { 227 ::computeKnownBits(V, Known, Depth, 228 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 229 } 230 231 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, 232 KnownBits &Known, const DataLayout &DL, 233 unsigned Depth, AssumptionCache *AC, 234 const Instruction *CxtI, const DominatorTree *DT, 235 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { 236 ::computeKnownBits(V, DemandedElts, Known, Depth, 237 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 238 } 239 240 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, 241 unsigned Depth, const Query &Q); 242 243 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 244 const Query &Q); 245 246 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 247 unsigned Depth, AssumptionCache *AC, 248 const Instruction *CxtI, 249 const DominatorTree *DT, 250 OptimizationRemarkEmitter *ORE, 251 bool UseInstrInfo) { 252 return ::computeKnownBits( 253 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 254 } 255 256 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, 257 const DataLayout &DL, unsigned Depth, 258 AssumptionCache *AC, const Instruction *CxtI, 259 const DominatorTree *DT, 260 OptimizationRemarkEmitter *ORE, 261 bool UseInstrInfo) { 262 return ::computeKnownBits( 263 V, DemandedElts, Depth, 264 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 265 } 266 267 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 268 const DataLayout &DL, AssumptionCache *AC, 269 const Instruction *CxtI, const DominatorTree *DT, 270 bool UseInstrInfo) { 271 assert(LHS->getType() == RHS->getType() && 272 "LHS and RHS should have the same type"); 273 assert(LHS->getType()->isIntOrIntVectorTy() && 274 "LHS and RHS should be integers"); 275 // Look for an inverted mask: (X & ~M) op (Y & M). 276 Value *M; 277 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 278 match(RHS, m_c_And(m_Specific(M), m_Value()))) 279 return true; 280 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 281 match(LHS, m_c_And(m_Specific(M), m_Value()))) 282 return true; 283 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 284 KnownBits LHSKnown(IT->getBitWidth()); 285 KnownBits RHSKnown(IT->getBitWidth()); 286 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 287 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 288 return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue(); 289 } 290 291 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI) { 292 for (const User *U : CxtI->users()) { 293 if (const ICmpInst *IC = dyn_cast<ICmpInst>(U)) 294 if (IC->isEquality()) 295 if (Constant *C = dyn_cast<Constant>(IC->getOperand(1))) 296 if (C->isNullValue()) 297 continue; 298 return false; 299 } 300 return true; 301 } 302 303 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 304 const Query &Q); 305 306 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 307 bool OrZero, unsigned Depth, 308 AssumptionCache *AC, const Instruction *CxtI, 309 const DominatorTree *DT, bool UseInstrInfo) { 310 return ::isKnownToBeAPowerOfTwo( 311 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 312 } 313 314 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts, 315 unsigned Depth, const Query &Q); 316 317 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 318 319 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 320 AssumptionCache *AC, const Instruction *CxtI, 321 const DominatorTree *DT, bool UseInstrInfo) { 322 return ::isKnownNonZero(V, Depth, 323 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 324 } 325 326 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 327 unsigned Depth, AssumptionCache *AC, 328 const Instruction *CxtI, const DominatorTree *DT, 329 bool UseInstrInfo) { 330 KnownBits Known = 331 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 332 return Known.isNonNegative(); 333 } 334 335 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 336 AssumptionCache *AC, const Instruction *CxtI, 337 const DominatorTree *DT, bool UseInstrInfo) { 338 if (auto *CI = dyn_cast<ConstantInt>(V)) 339 return CI->getValue().isStrictlyPositive(); 340 341 // TODO: We'd doing two recursive queries here. We should factor this such 342 // that only a single query is needed. 343 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) && 344 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); 345 } 346 347 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 348 AssumptionCache *AC, const Instruction *CxtI, 349 const DominatorTree *DT, bool UseInstrInfo) { 350 KnownBits Known = 351 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 352 return Known.isNegative(); 353 } 354 355 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); 356 357 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 358 const DataLayout &DL, AssumptionCache *AC, 359 const Instruction *CxtI, const DominatorTree *DT, 360 bool UseInstrInfo) { 361 return ::isKnownNonEqual(V1, V2, 362 Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT, 363 UseInstrInfo, /*ORE=*/nullptr)); 364 } 365 366 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 367 const Query &Q); 368 369 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 370 const DataLayout &DL, unsigned Depth, 371 AssumptionCache *AC, const Instruction *CxtI, 372 const DominatorTree *DT, bool UseInstrInfo) { 373 return ::MaskedValueIsZero( 374 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 375 } 376 377 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, 378 unsigned Depth, const Query &Q); 379 380 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 381 const Query &Q) { 382 // FIXME: We currently have no way to represent the DemandedElts of a scalable 383 // vector 384 if (isa<ScalableVectorType>(V->getType())) 385 return 1; 386 387 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 388 APInt DemandedElts = 389 FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1); 390 return ComputeNumSignBits(V, DemandedElts, Depth, Q); 391 } 392 393 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 394 unsigned Depth, AssumptionCache *AC, 395 const Instruction *CxtI, 396 const DominatorTree *DT, bool UseInstrInfo) { 397 return ::ComputeNumSignBits( 398 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 399 } 400 401 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 402 bool NSW, const APInt &DemandedElts, 403 KnownBits &KnownOut, KnownBits &Known2, 404 unsigned Depth, const Query &Q) { 405 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q); 406 407 // If one operand is unknown and we have no nowrap information, 408 // the result will be unknown independently of the second operand. 409 if (KnownOut.isUnknown() && !NSW) 410 return; 411 412 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); 413 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut); 414 } 415 416 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 417 const APInt &DemandedElts, KnownBits &Known, 418 KnownBits &Known2, unsigned Depth, 419 const Query &Q) { 420 unsigned BitWidth = Known.getBitWidth(); 421 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q); 422 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); 423 424 bool isKnownNegative = false; 425 bool isKnownNonNegative = false; 426 // If the multiplication is known not to overflow, compute the sign bit. 427 if (NSW) { 428 if (Op0 == Op1) { 429 // The product of a number with itself is non-negative. 430 isKnownNonNegative = true; 431 } else { 432 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 433 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 434 bool isKnownNegativeOp1 = Known.isNegative(); 435 bool isKnownNegativeOp0 = Known2.isNegative(); 436 // The product of two numbers with the same sign is non-negative. 437 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 438 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 439 // The product of a negative number and a non-negative number is either 440 // negative or zero. 441 if (!isKnownNonNegative) 442 isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 443 isKnownNonZero(Op0, Depth, Q)) || 444 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && 445 isKnownNonZero(Op1, Depth, Q)); 446 } 447 } 448 449 assert(!Known.hasConflict() && !Known2.hasConflict()); 450 // Compute a conservative estimate for high known-0 bits. 451 unsigned LeadZ = std::max(Known.countMinLeadingZeros() + 452 Known2.countMinLeadingZeros(), 453 BitWidth) - BitWidth; 454 LeadZ = std::min(LeadZ, BitWidth); 455 456 // The result of the bottom bits of an integer multiply can be 457 // inferred by looking at the bottom bits of both operands and 458 // multiplying them together. 459 // We can infer at least the minimum number of known trailing bits 460 // of both operands. Depending on number of trailing zeros, we can 461 // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming 462 // a and b are divisible by m and n respectively. 463 // We then calculate how many of those bits are inferrable and set 464 // the output. For example, the i8 mul: 465 // a = XXXX1100 (12) 466 // b = XXXX1110 (14) 467 // We know the bottom 3 bits are zero since the first can be divided by 468 // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4). 469 // Applying the multiplication to the trimmed arguments gets: 470 // XX11 (3) 471 // X111 (7) 472 // ------- 473 // XX11 474 // XX11 475 // XX11 476 // XX11 477 // ------- 478 // XXXXX01 479 // Which allows us to infer the 2 LSBs. Since we're multiplying the result 480 // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits. 481 // The proof for this can be described as: 482 // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) && 483 // (C7 == (1 << (umin(countTrailingZeros(C1), C5) + 484 // umin(countTrailingZeros(C2), C6) + 485 // umin(C5 - umin(countTrailingZeros(C1), C5), 486 // C6 - umin(countTrailingZeros(C2), C6)))) - 1) 487 // %aa = shl i8 %a, C5 488 // %bb = shl i8 %b, C6 489 // %aaa = or i8 %aa, C1 490 // %bbb = or i8 %bb, C2 491 // %mul = mul i8 %aaa, %bbb 492 // %mask = and i8 %mul, C7 493 // => 494 // %mask = i8 ((C1*C2)&C7) 495 // Where C5, C6 describe the known bits of %a, %b 496 // C1, C2 describe the known bottom bits of %a, %b. 497 // C7 describes the mask of the known bits of the result. 498 APInt Bottom0 = Known.One; 499 APInt Bottom1 = Known2.One; 500 501 // How many times we'd be able to divide each argument by 2 (shr by 1). 502 // This gives us the number of trailing zeros on the multiplication result. 503 unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes(); 504 unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes(); 505 unsigned TrailZero0 = Known.countMinTrailingZeros(); 506 unsigned TrailZero1 = Known2.countMinTrailingZeros(); 507 unsigned TrailZ = TrailZero0 + TrailZero1; 508 509 // Figure out the fewest known-bits operand. 510 unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0, 511 TrailBitsKnown1 - TrailZero1); 512 unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth); 513 514 APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) * 515 Bottom1.getLoBits(TrailBitsKnown1); 516 517 Known.resetAll(); 518 Known.Zero.setHighBits(LeadZ); 519 Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown); 520 Known.One |= BottomKnown.getLoBits(ResultBitsKnown); 521 522 // Only make use of no-wrap flags if we failed to compute the sign bit 523 // directly. This matters if the multiplication always overflows, in 524 // which case we prefer to follow the result of the direct computation, 525 // though as the program is invoking undefined behaviour we can choose 526 // whatever we like here. 527 if (isKnownNonNegative && !Known.isNegative()) 528 Known.makeNonNegative(); 529 else if (isKnownNegative && !Known.isNonNegative()) 530 Known.makeNegative(); 531 } 532 533 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 534 KnownBits &Known) { 535 unsigned BitWidth = Known.getBitWidth(); 536 unsigned NumRanges = Ranges.getNumOperands() / 2; 537 assert(NumRanges >= 1); 538 539 Known.Zero.setAllBits(); 540 Known.One.setAllBits(); 541 542 for (unsigned i = 0; i < NumRanges; ++i) { 543 ConstantInt *Lower = 544 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 545 ConstantInt *Upper = 546 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 547 ConstantRange Range(Lower->getValue(), Upper->getValue()); 548 549 // The first CommonPrefixBits of all values in Range are equal. 550 unsigned CommonPrefixBits = 551 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 552 553 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 554 Known.One &= Range.getUnsignedMax() & Mask; 555 Known.Zero &= ~Range.getUnsignedMax() & Mask; 556 } 557 } 558 559 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 560 SmallVector<const Value *, 16> WorkSet(1, I); 561 SmallPtrSet<const Value *, 32> Visited; 562 SmallPtrSet<const Value *, 16> EphValues; 563 564 // The instruction defining an assumption's condition itself is always 565 // considered ephemeral to that assumption (even if it has other 566 // non-ephemeral users). See r246696's test case for an example. 567 if (is_contained(I->operands(), E)) 568 return true; 569 570 while (!WorkSet.empty()) { 571 const Value *V = WorkSet.pop_back_val(); 572 if (!Visited.insert(V).second) 573 continue; 574 575 // If all uses of this value are ephemeral, then so is this value. 576 if (llvm::all_of(V->users(), [&](const User *U) { 577 return EphValues.count(U); 578 })) { 579 if (V == E) 580 return true; 581 582 if (V == I || isSafeToSpeculativelyExecute(V)) { 583 EphValues.insert(V); 584 if (const User *U = dyn_cast<User>(V)) 585 for (User::const_op_iterator J = U->op_begin(), JE = U->op_end(); 586 J != JE; ++J) 587 WorkSet.push_back(*J); 588 } 589 } 590 } 591 592 return false; 593 } 594 595 // Is this an intrinsic that cannot be speculated but also cannot trap? 596 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { 597 if (const CallInst *CI = dyn_cast<CallInst>(I)) 598 if (Function *F = CI->getCalledFunction()) 599 switch (F->getIntrinsicID()) { 600 default: break; 601 // FIXME: This list is repeated from NoTTI::getIntrinsicCost. 602 case Intrinsic::assume: 603 case Intrinsic::sideeffect: 604 case Intrinsic::dbg_declare: 605 case Intrinsic::dbg_value: 606 case Intrinsic::dbg_label: 607 case Intrinsic::invariant_start: 608 case Intrinsic::invariant_end: 609 case Intrinsic::lifetime_start: 610 case Intrinsic::lifetime_end: 611 case Intrinsic::objectsize: 612 case Intrinsic::ptr_annotation: 613 case Intrinsic::var_annotation: 614 return true; 615 } 616 617 return false; 618 } 619 620 bool llvm::isValidAssumeForContext(const Instruction *Inv, 621 const Instruction *CxtI, 622 const DominatorTree *DT) { 623 // There are two restrictions on the use of an assume: 624 // 1. The assume must dominate the context (or the control flow must 625 // reach the assume whenever it reaches the context). 626 // 2. The context must not be in the assume's set of ephemeral values 627 // (otherwise we will use the assume to prove that the condition 628 // feeding the assume is trivially true, thus causing the removal of 629 // the assume). 630 631 if (Inv->getParent() == CxtI->getParent()) { 632 // If Inv and CtxI are in the same block, check if the assume (Inv) is first 633 // in the BB. 634 if (Inv->comesBefore(CxtI)) 635 return true; 636 637 // Don't let an assume affect itself - this would cause the problems 638 // `isEphemeralValueOf` is trying to prevent, and it would also make 639 // the loop below go out of bounds. 640 if (Inv == CxtI) 641 return false; 642 643 // The context comes first, but they're both in the same block. 644 // Make sure there is nothing in between that might interrupt 645 // the control flow, not even CxtI itself. 646 for (BasicBlock::const_iterator I(CxtI), IE(Inv); I != IE; ++I) 647 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 648 return false; 649 650 return !isEphemeralValueOf(Inv, CxtI); 651 } 652 653 // Inv and CxtI are in different blocks. 654 if (DT) { 655 if (DT->dominates(Inv, CxtI)) 656 return true; 657 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 658 // We don't have a DT, but this trivially dominates. 659 return true; 660 } 661 662 return false; 663 } 664 665 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) { 666 // Use of assumptions is context-sensitive. If we don't have a context, we 667 // cannot use them! 668 if (!Q.AC || !Q.CxtI) 669 return false; 670 671 // Note that the patterns below need to be kept in sync with the code 672 // in AssumptionCache::updateAffectedValues. 673 674 auto CmpExcludesZero = [V](ICmpInst *Cmp) { 675 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); 676 677 Value *RHS; 678 CmpInst::Predicate Pred; 679 if (!match(Cmp, m_c_ICmp(Pred, m_V, m_Value(RHS)))) 680 return false; 681 // assume(v u> y) -> assume(v != 0) 682 if (Pred == ICmpInst::ICMP_UGT) 683 return true; 684 685 // assume(v != 0) 686 // We special-case this one to ensure that we handle `assume(v != null)`. 687 if (Pred == ICmpInst::ICMP_NE) 688 return match(RHS, m_Zero()); 689 690 // All other predicates - rely on generic ConstantRange handling. 691 ConstantInt *CI; 692 if (!match(RHS, m_ConstantInt(CI))) 693 return false; 694 ConstantRange RHSRange(CI->getValue()); 695 ConstantRange TrueValues = 696 ConstantRange::makeAllowedICmpRegion(Pred, RHSRange); 697 return !TrueValues.contains(APInt::getNullValue(CI->getBitWidth())); 698 }; 699 700 if (Q.CxtI && V->getType()->isPointerTy()) { 701 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull}; 702 if (!NullPointerIsDefined(Q.CxtI->getFunction(), 703 V->getType()->getPointerAddressSpace())) 704 AttrKinds.push_back(Attribute::Dereferenceable); 705 706 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC)) 707 return true; 708 } 709 710 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 711 if (!AssumeVH) 712 continue; 713 CallInst *I = cast<CallInst>(AssumeVH); 714 assert(I->getFunction() == Q.CxtI->getFunction() && 715 "Got assumption for the wrong function!"); 716 if (Q.isExcluded(I)) 717 continue; 718 719 // Warning: This loop can end up being somewhat performance sensitive. 720 // We're running this loop for once for each value queried resulting in a 721 // runtime of ~O(#assumes * #values). 722 723 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 724 "must be an assume intrinsic"); 725 726 Value *Arg = I->getArgOperand(0); 727 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); 728 if (!Cmp) 729 continue; 730 731 if (CmpExcludesZero(Cmp) && isValidAssumeForContext(I, Q.CxtI, Q.DT)) 732 return true; 733 } 734 735 return false; 736 } 737 738 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 739 unsigned Depth, const Query &Q) { 740 // Use of assumptions is context-sensitive. If we don't have a context, we 741 // cannot use them! 742 if (!Q.AC || !Q.CxtI) 743 return; 744 745 unsigned BitWidth = Known.getBitWidth(); 746 747 // Note that the patterns below need to be kept in sync with the code 748 // in AssumptionCache::updateAffectedValues. 749 750 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 751 if (!AssumeVH) 752 continue; 753 CallInst *I = cast<CallInst>(AssumeVH); 754 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 755 "Got assumption for the wrong function!"); 756 if (Q.isExcluded(I)) 757 continue; 758 759 // Warning: This loop can end up being somewhat performance sensitive. 760 // We're running this loop for once for each value queried resulting in a 761 // runtime of ~O(#assumes * #values). 762 763 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 764 "must be an assume intrinsic"); 765 766 Value *Arg = I->getArgOperand(0); 767 768 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 769 assert(BitWidth == 1 && "assume operand is not i1?"); 770 Known.setAllOnes(); 771 return; 772 } 773 if (match(Arg, m_Not(m_Specific(V))) && 774 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 775 assert(BitWidth == 1 && "assume operand is not i1?"); 776 Known.setAllZero(); 777 return; 778 } 779 780 // The remaining tests are all recursive, so bail out if we hit the limit. 781 if (Depth == MaxDepth) 782 continue; 783 784 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); 785 if (!Cmp) 786 continue; 787 788 // Note that ptrtoint may change the bitwidth. 789 Value *A, *B; 790 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); 791 792 CmpInst::Predicate Pred; 793 uint64_t C; 794 switch (Cmp->getPredicate()) { 795 default: 796 break; 797 case ICmpInst::ICMP_EQ: 798 // assume(v = a) 799 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) && 800 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 801 KnownBits RHSKnown = 802 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 803 Known.Zero |= RHSKnown.Zero; 804 Known.One |= RHSKnown.One; 805 // assume(v & b = a) 806 } else if (match(Cmp, 807 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 808 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 809 KnownBits RHSKnown = 810 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 811 KnownBits MaskKnown = 812 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 813 814 // For those bits in the mask that are known to be one, we can propagate 815 // known bits from the RHS to V. 816 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 817 Known.One |= RHSKnown.One & MaskKnown.One; 818 // assume(~(v & b) = a) 819 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 820 m_Value(A))) && 821 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 822 KnownBits RHSKnown = 823 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 824 KnownBits MaskKnown = 825 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 826 827 // For those bits in the mask that are known to be one, we can propagate 828 // inverted known bits from the RHS to V. 829 Known.Zero |= RHSKnown.One & MaskKnown.One; 830 Known.One |= RHSKnown.Zero & MaskKnown.One; 831 // assume(v | b = a) 832 } else if (match(Cmp, 833 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 834 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 835 KnownBits RHSKnown = 836 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 837 KnownBits BKnown = 838 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 839 840 // For those bits in B that are known to be zero, we can propagate known 841 // bits from the RHS to V. 842 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 843 Known.One |= RHSKnown.One & BKnown.Zero; 844 // assume(~(v | b) = a) 845 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 846 m_Value(A))) && 847 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 848 KnownBits RHSKnown = 849 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 850 KnownBits BKnown = 851 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 852 853 // For those bits in B that are known to be zero, we can propagate 854 // inverted known bits from the RHS to V. 855 Known.Zero |= RHSKnown.One & BKnown.Zero; 856 Known.One |= RHSKnown.Zero & BKnown.Zero; 857 // assume(v ^ b = a) 858 } else if (match(Cmp, 859 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 860 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 861 KnownBits RHSKnown = 862 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 863 KnownBits BKnown = 864 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 865 866 // For those bits in B that are known to be zero, we can propagate known 867 // bits from the RHS to V. For those bits in B that are known to be one, 868 // we can propagate inverted known bits from the RHS to V. 869 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 870 Known.One |= RHSKnown.One & BKnown.Zero; 871 Known.Zero |= RHSKnown.One & BKnown.One; 872 Known.One |= RHSKnown.Zero & BKnown.One; 873 // assume(~(v ^ b) = a) 874 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 875 m_Value(A))) && 876 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 877 KnownBits RHSKnown = 878 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 879 KnownBits BKnown = 880 computeKnownBits(B, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 881 882 // For those bits in B that are known to be zero, we can propagate 883 // inverted known bits from the RHS to V. For those bits in B that are 884 // known to be one, we can propagate known bits from the RHS to V. 885 Known.Zero |= RHSKnown.One & BKnown.Zero; 886 Known.One |= RHSKnown.Zero & BKnown.Zero; 887 Known.Zero |= RHSKnown.Zero & BKnown.One; 888 Known.One |= RHSKnown.One & BKnown.One; 889 // assume(v << c = a) 890 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 891 m_Value(A))) && 892 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { 893 KnownBits RHSKnown = 894 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 895 896 // For those bits in RHS that are known, we can propagate them to known 897 // bits in V shifted to the right by C. 898 RHSKnown.Zero.lshrInPlace(C); 899 Known.Zero |= RHSKnown.Zero; 900 RHSKnown.One.lshrInPlace(C); 901 Known.One |= RHSKnown.One; 902 // assume(~(v << c) = a) 903 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 904 m_Value(A))) && 905 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { 906 KnownBits RHSKnown = 907 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 908 // For those bits in RHS that are known, we can propagate them inverted 909 // to known bits in V shifted to the right by C. 910 RHSKnown.One.lshrInPlace(C); 911 Known.Zero |= RHSKnown.One; 912 RHSKnown.Zero.lshrInPlace(C); 913 Known.One |= RHSKnown.Zero; 914 // assume(v >> c = a) 915 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), 916 m_Value(A))) && 917 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { 918 KnownBits RHSKnown = 919 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 920 // For those bits in RHS that are known, we can propagate them to known 921 // bits in V shifted to the right by C. 922 Known.Zero |= RHSKnown.Zero << C; 923 Known.One |= RHSKnown.One << C; 924 // assume(~(v >> c) = a) 925 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), 926 m_Value(A))) && 927 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { 928 KnownBits RHSKnown = 929 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 930 // For those bits in RHS that are known, we can propagate them inverted 931 // to known bits in V shifted to the right by C. 932 Known.Zero |= RHSKnown.One << C; 933 Known.One |= RHSKnown.Zero << C; 934 } 935 break; 936 case ICmpInst::ICMP_SGE: 937 // assume(v >=_s c) where c is non-negative 938 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 939 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 940 KnownBits RHSKnown = 941 computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth); 942 943 if (RHSKnown.isNonNegative()) { 944 // We know that the sign bit is zero. 945 Known.makeNonNegative(); 946 } 947 } 948 break; 949 case ICmpInst::ICMP_SGT: 950 // assume(v >_s c) where c is at least -1. 951 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 952 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 953 KnownBits RHSKnown = 954 computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth); 955 956 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { 957 // We know that the sign bit is zero. 958 Known.makeNonNegative(); 959 } 960 } 961 break; 962 case ICmpInst::ICMP_SLE: 963 // assume(v <=_s c) where c is negative 964 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 965 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 966 KnownBits RHSKnown = 967 computeKnownBits(A, Depth + 1, Query(Q, I)).anyextOrTrunc(BitWidth); 968 969 if (RHSKnown.isNegative()) { 970 // We know that the sign bit is one. 971 Known.makeNegative(); 972 } 973 } 974 break; 975 case ICmpInst::ICMP_SLT: 976 // assume(v <_s c) where c is non-positive 977 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 978 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 979 KnownBits RHSKnown = 980 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 981 982 if (RHSKnown.isZero() || RHSKnown.isNegative()) { 983 // We know that the sign bit is one. 984 Known.makeNegative(); 985 } 986 } 987 break; 988 case ICmpInst::ICMP_ULE: 989 // assume(v <=_u c) 990 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 991 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 992 KnownBits RHSKnown = 993 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 994 995 // Whatever high bits in c are zero are known to be zero. 996 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 997 } 998 break; 999 case ICmpInst::ICMP_ULT: 1000 // assume(v <_u c) 1001 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 1002 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 1003 KnownBits RHSKnown = 1004 computeKnownBits(A, Depth+1, Query(Q, I)).anyextOrTrunc(BitWidth); 1005 1006 // If the RHS is known zero, then this assumption must be wrong (nothing 1007 // is unsigned less than zero). Signal a conflict and get out of here. 1008 if (RHSKnown.isZero()) { 1009 Known.Zero.setAllBits(); 1010 Known.One.setAllBits(); 1011 break; 1012 } 1013 1014 // Whatever high bits in c are zero are known to be zero (if c is a power 1015 // of 2, then one more). 1016 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I))) 1017 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); 1018 else 1019 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 1020 } 1021 break; 1022 } 1023 } 1024 1025 // If assumptions conflict with each other or previous known bits, then we 1026 // have a logical fallacy. It's possible that the assumption is not reachable, 1027 // so this isn't a real bug. On the other hand, the program may have undefined 1028 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 1029 // clear out the known bits, try to warn the user, and hope for the best. 1030 if (Known.Zero.intersects(Known.One)) { 1031 Known.resetAll(); 1032 1033 if (Q.ORE) 1034 Q.ORE->emit([&]() { 1035 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 1036 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", 1037 CxtI) 1038 << "Detected conflicting code assumptions. Program may " 1039 "have undefined behavior, or compiler may have " 1040 "internal error."; 1041 }); 1042 } 1043 } 1044 1045 /// Compute known bits from a shift operator, including those with a 1046 /// non-constant shift amount. Known is the output of this function. Known2 is a 1047 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are 1048 /// operator-specific functions that, given the known-zero or known-one bits 1049 /// respectively, and a shift amount, compute the implied known-zero or 1050 /// known-one bits of the shift operator's result respectively for that shift 1051 /// amount. The results from calling KZF and KOF are conservatively combined for 1052 /// all permitted shift amounts. 1053 static void computeKnownBitsFromShiftOperator( 1054 const Operator *I, const APInt &DemandedElts, KnownBits &Known, 1055 KnownBits &Known2, unsigned Depth, const Query &Q, 1056 function_ref<APInt(const APInt &, unsigned)> KZF, 1057 function_ref<APInt(const APInt &, unsigned)> KOF) { 1058 unsigned BitWidth = Known.getBitWidth(); 1059 1060 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1061 if (Known.isConstant()) { 1062 unsigned ShiftAmt = Known.getConstant().getLimitedValue(BitWidth - 1); 1063 1064 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); 1065 Known.Zero = KZF(Known.Zero, ShiftAmt); 1066 Known.One = KOF(Known.One, ShiftAmt); 1067 // If the known bits conflict, this must be an overflowing left shift, so 1068 // the shift result is poison. We can return anything we want. Choose 0 for 1069 // the best folding opportunity. 1070 if (Known.hasConflict()) 1071 Known.setAllZero(); 1072 1073 return; 1074 } 1075 1076 // If the shift amount could be greater than or equal to the bit-width of the 1077 // LHS, the value could be poison, but bail out because the check below is 1078 // expensive. 1079 // TODO: Should we just carry on? 1080 if (Known.getMaxValue().uge(BitWidth)) { 1081 Known.resetAll(); 1082 return; 1083 } 1084 1085 // Note: We cannot use Known.Zero.getLimitedValue() here, because if 1086 // BitWidth > 64 and any upper bits are known, we'll end up returning the 1087 // limit value (which implies all bits are known). 1088 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); 1089 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); 1090 1091 // It would be more-clearly correct to use the two temporaries for this 1092 // calculation. Reusing the APInts here to prevent unnecessary allocations. 1093 Known.resetAll(); 1094 1095 // If we know the shifter operand is nonzero, we can sometimes infer more 1096 // known bits. However this is expensive to compute, so be lazy about it and 1097 // only compute it when absolutely necessary. 1098 Optional<bool> ShifterOperandIsNonZero; 1099 1100 // Early exit if we can't constrain any well-defined shift amount. 1101 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && 1102 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { 1103 ShifterOperandIsNonZero = 1104 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q); 1105 if (!*ShifterOperandIsNonZero) 1106 return; 1107 } 1108 1109 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1110 1111 Known.Zero.setAllBits(); 1112 Known.One.setAllBits(); 1113 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 1114 // Combine the shifted known input bits only for those shift amounts 1115 // compatible with its known constraints. 1116 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 1117 continue; 1118 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 1119 continue; 1120 // If we know the shifter is nonzero, we may be able to infer more known 1121 // bits. This check is sunk down as far as possible to avoid the expensive 1122 // call to isKnownNonZero if the cheaper checks above fail. 1123 if (ShiftAmt == 0) { 1124 if (!ShifterOperandIsNonZero.hasValue()) 1125 ShifterOperandIsNonZero = 1126 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q); 1127 if (*ShifterOperandIsNonZero) 1128 continue; 1129 } 1130 1131 Known.Zero &= KZF(Known2.Zero, ShiftAmt); 1132 Known.One &= KOF(Known2.One, ShiftAmt); 1133 } 1134 1135 // If the known bits conflict, the result is poison. Return a 0 and hope the 1136 // caller can further optimize that. 1137 if (Known.hasConflict()) 1138 Known.setAllZero(); 1139 } 1140 1141 static void computeKnownBitsFromOperator(const Operator *I, 1142 const APInt &DemandedElts, 1143 KnownBits &Known, unsigned Depth, 1144 const Query &Q) { 1145 unsigned BitWidth = Known.getBitWidth(); 1146 1147 KnownBits Known2(BitWidth); 1148 switch (I->getOpcode()) { 1149 default: break; 1150 case Instruction::Load: 1151 if (MDNode *MD = 1152 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range)) 1153 computeKnownBitsFromRangeMetadata(*MD, Known); 1154 break; 1155 case Instruction::And: { 1156 // If either the LHS or the RHS are Zero, the result is zero. 1157 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1158 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1159 1160 Known &= Known2; 1161 1162 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 1163 // here we handle the more general case of adding any odd number by 1164 // matching the form add(x, add(x, y)) where y is odd. 1165 // TODO: This could be generalized to clearing any bit set in y where the 1166 // following bit is known to be unset in y. 1167 Value *X = nullptr, *Y = nullptr; 1168 if (!Known.Zero[0] && !Known.One[0] && 1169 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) { 1170 Known2.resetAll(); 1171 computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q); 1172 if (Known2.countMinTrailingOnes() > 0) 1173 Known.Zero.setBit(0); 1174 } 1175 break; 1176 } 1177 case Instruction::Or: 1178 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1179 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1180 1181 Known |= Known2; 1182 break; 1183 case Instruction::Xor: 1184 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1185 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1186 1187 Known ^= Known2; 1188 break; 1189 case Instruction::Mul: { 1190 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1191 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts, 1192 Known, Known2, Depth, Q); 1193 break; 1194 } 1195 case Instruction::UDiv: { 1196 // For the purposes of computing leading zeros we can conservatively 1197 // treat a udiv as a logical right shift by the power of 2 known to 1198 // be less than the denominator. 1199 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1200 unsigned LeadZ = Known2.countMinLeadingZeros(); 1201 1202 Known2.resetAll(); 1203 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1204 unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros(); 1205 if (RHSMaxLeadingZeros != BitWidth) 1206 LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1); 1207 1208 Known.Zero.setHighBits(LeadZ); 1209 break; 1210 } 1211 case Instruction::Select: { 1212 const Value *LHS = nullptr, *RHS = nullptr; 1213 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 1214 if (SelectPatternResult::isMinOrMax(SPF)) { 1215 computeKnownBits(RHS, Known, Depth + 1, Q); 1216 computeKnownBits(LHS, Known2, Depth + 1, Q); 1217 } else { 1218 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 1219 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1220 } 1221 1222 unsigned MaxHighOnes = 0; 1223 unsigned MaxHighZeros = 0; 1224 if (SPF == SPF_SMAX) { 1225 // If both sides are negative, the result is negative. 1226 if (Known.isNegative() && Known2.isNegative()) 1227 // We can derive a lower bound on the result by taking the max of the 1228 // leading one bits. 1229 MaxHighOnes = 1230 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1231 // If either side is non-negative, the result is non-negative. 1232 else if (Known.isNonNegative() || Known2.isNonNegative()) 1233 MaxHighZeros = 1; 1234 } else if (SPF == SPF_SMIN) { 1235 // If both sides are non-negative, the result is non-negative. 1236 if (Known.isNonNegative() && Known2.isNonNegative()) 1237 // We can derive an upper bound on the result by taking the max of the 1238 // leading zero bits. 1239 MaxHighZeros = std::max(Known.countMinLeadingZeros(), 1240 Known2.countMinLeadingZeros()); 1241 // If either side is negative, the result is negative. 1242 else if (Known.isNegative() || Known2.isNegative()) 1243 MaxHighOnes = 1; 1244 } else if (SPF == SPF_UMAX) { 1245 // We can derive a lower bound on the result by taking the max of the 1246 // leading one bits. 1247 MaxHighOnes = 1248 std::max(Known.countMinLeadingOnes(), Known2.countMinLeadingOnes()); 1249 } else if (SPF == SPF_UMIN) { 1250 // We can derive an upper bound on the result by taking the max of the 1251 // leading zero bits. 1252 MaxHighZeros = 1253 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1254 } else if (SPF == SPF_ABS) { 1255 // RHS from matchSelectPattern returns the negation part of abs pattern. 1256 // If the negate has an NSW flag we can assume the sign bit of the result 1257 // will be 0 because that makes abs(INT_MIN) undefined. 1258 if (match(RHS, m_Neg(m_Specific(LHS))) && 1259 Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) 1260 MaxHighZeros = 1; 1261 } 1262 1263 // Only known if known in both the LHS and RHS. 1264 Known.One &= Known2.One; 1265 Known.Zero &= Known2.Zero; 1266 if (MaxHighOnes > 0) 1267 Known.One.setHighBits(MaxHighOnes); 1268 if (MaxHighZeros > 0) 1269 Known.Zero.setHighBits(MaxHighZeros); 1270 break; 1271 } 1272 case Instruction::FPTrunc: 1273 case Instruction::FPExt: 1274 case Instruction::FPToUI: 1275 case Instruction::FPToSI: 1276 case Instruction::SIToFP: 1277 case Instruction::UIToFP: 1278 break; // Can't work with floating point. 1279 case Instruction::PtrToInt: 1280 case Instruction::IntToPtr: 1281 // Fall through and handle them the same as zext/trunc. 1282 LLVM_FALLTHROUGH; 1283 case Instruction::ZExt: 1284 case Instruction::Trunc: { 1285 Type *SrcTy = I->getOperand(0)->getType(); 1286 1287 unsigned SrcBitWidth; 1288 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1289 // which fall through here. 1290 Type *ScalarTy = SrcTy->getScalarType(); 1291 SrcBitWidth = ScalarTy->isPointerTy() ? 1292 Q.DL.getPointerTypeSizeInBits(ScalarTy) : 1293 Q.DL.getTypeSizeInBits(ScalarTy); 1294 1295 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1296 Known = Known.anyextOrTrunc(SrcBitWidth); 1297 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1298 Known = Known.zextOrTrunc(BitWidth); 1299 break; 1300 } 1301 case Instruction::BitCast: { 1302 Type *SrcTy = I->getOperand(0)->getType(); 1303 if (SrcTy->isIntOrPtrTy() && 1304 // TODO: For now, not handling conversions like: 1305 // (bitcast i64 %x to <2 x i32>) 1306 !I->getType()->isVectorTy()) { 1307 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1308 break; 1309 } 1310 break; 1311 } 1312 case Instruction::SExt: { 1313 // Compute the bits in the result that are not present in the input. 1314 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1315 1316 Known = Known.trunc(SrcBitWidth); 1317 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1318 // If the sign bit of the input is known set or clear, then we know the 1319 // top bits of the result. 1320 Known = Known.sext(BitWidth); 1321 break; 1322 } 1323 case Instruction::Shl: { 1324 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 1325 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1326 auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) { 1327 APInt KZResult = KnownZero << ShiftAmt; 1328 KZResult.setLowBits(ShiftAmt); // Low bits known 0. 1329 // If this shift has "nsw" keyword, then the result is either a poison 1330 // value or has the same sign bit as the first operand. 1331 if (NSW && KnownZero.isSignBitSet()) 1332 KZResult.setSignBit(); 1333 return KZResult; 1334 }; 1335 1336 auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) { 1337 APInt KOResult = KnownOne << ShiftAmt; 1338 if (NSW && KnownOne.isSignBitSet()) 1339 KOResult.setSignBit(); 1340 return KOResult; 1341 }; 1342 1343 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, 1344 KZF, KOF); 1345 break; 1346 } 1347 case Instruction::LShr: { 1348 // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1349 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1350 APInt KZResult = KnownZero.lshr(ShiftAmt); 1351 // High bits known zero. 1352 KZResult.setHighBits(ShiftAmt); 1353 return KZResult; 1354 }; 1355 1356 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1357 return KnownOne.lshr(ShiftAmt); 1358 }; 1359 1360 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, 1361 KZF, KOF); 1362 break; 1363 } 1364 case Instruction::AShr: { 1365 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 1366 auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { 1367 return KnownZero.ashr(ShiftAmt); 1368 }; 1369 1370 auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { 1371 return KnownOne.ashr(ShiftAmt); 1372 }; 1373 1374 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, 1375 KZF, KOF); 1376 break; 1377 } 1378 case Instruction::Sub: { 1379 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1380 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1381 DemandedElts, Known, Known2, Depth, Q); 1382 break; 1383 } 1384 case Instruction::Add: { 1385 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1386 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1387 DemandedElts, Known, Known2, Depth, Q); 1388 break; 1389 } 1390 case Instruction::SRem: 1391 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1392 APInt RA = Rem->getValue().abs(); 1393 if (RA.isPowerOf2()) { 1394 APInt LowBits = RA - 1; 1395 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1396 1397 // The low bits of the first operand are unchanged by the srem. 1398 Known.Zero = Known2.Zero & LowBits; 1399 Known.One = Known2.One & LowBits; 1400 1401 // If the first operand is non-negative or has all low bits zero, then 1402 // the upper bits are all zero. 1403 if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero)) 1404 Known.Zero |= ~LowBits; 1405 1406 // If the first operand is negative and not all low bits are zero, then 1407 // the upper bits are all one. 1408 if (Known2.isNegative() && LowBits.intersects(Known2.One)) 1409 Known.One |= ~LowBits; 1410 1411 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 1412 break; 1413 } 1414 } 1415 1416 // The sign bit is the LHS's sign bit, except when the result of the 1417 // remainder is zero. 1418 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1419 // If it's known zero, our sign bit is also zero. 1420 if (Known2.isNonNegative()) 1421 Known.makeNonNegative(); 1422 1423 break; 1424 case Instruction::URem: { 1425 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 1426 const APInt &RA = Rem->getValue(); 1427 if (RA.isPowerOf2()) { 1428 APInt LowBits = (RA - 1); 1429 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1430 Known.Zero |= ~LowBits; 1431 Known.One &= LowBits; 1432 break; 1433 } 1434 } 1435 1436 // Since the result is less than or equal to either operand, any leading 1437 // zero bits in either operand must also exist in the result. 1438 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1439 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1440 1441 unsigned Leaders = 1442 std::max(Known.countMinLeadingZeros(), Known2.countMinLeadingZeros()); 1443 Known.resetAll(); 1444 Known.Zero.setHighBits(Leaders); 1445 break; 1446 } 1447 1448 case Instruction::Alloca: { 1449 const AllocaInst *AI = cast<AllocaInst>(I); 1450 unsigned Align = AI->getAlignment(); 1451 if (Align == 0) 1452 Align = Q.DL.getABITypeAlignment(AI->getAllocatedType()); 1453 1454 if (Align > 0) 1455 Known.Zero.setLowBits(countTrailingZeros(Align)); 1456 break; 1457 } 1458 case Instruction::GetElementPtr: { 1459 // Analyze all of the subscripts of this getelementptr instruction 1460 // to determine if we can prove known low zero bits. 1461 KnownBits LocalKnown(BitWidth); 1462 computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q); 1463 unsigned TrailZ = LocalKnown.countMinTrailingZeros(); 1464 1465 gep_type_iterator GTI = gep_type_begin(I); 1466 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1467 // TrailZ can only become smaller, short-circuit if we hit zero. 1468 if (TrailZ == 0) 1469 break; 1470 1471 Value *Index = I->getOperand(i); 1472 if (StructType *STy = GTI.getStructTypeOrNull()) { 1473 // Handle struct member offset arithmetic. 1474 1475 // Handle case when index is vector zeroinitializer 1476 Constant *CIndex = cast<Constant>(Index); 1477 if (CIndex->isZeroValue()) 1478 continue; 1479 1480 if (CIndex->getType()->isVectorTy()) 1481 Index = CIndex->getSplatValue(); 1482 1483 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1484 const StructLayout *SL = Q.DL.getStructLayout(STy); 1485 uint64_t Offset = SL->getElementOffset(Idx); 1486 TrailZ = std::min<unsigned>(TrailZ, 1487 countTrailingZeros(Offset)); 1488 } else { 1489 // Handle array index arithmetic. 1490 Type *IndexedTy = GTI.getIndexedType(); 1491 if (!IndexedTy->isSized()) { 1492 TrailZ = 0; 1493 break; 1494 } 1495 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 1496 uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy).getKnownMinSize(); 1497 LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0); 1498 computeKnownBits(Index, LocalKnown, Depth + 1, Q); 1499 TrailZ = std::min(TrailZ, 1500 unsigned(countTrailingZeros(TypeSize) + 1501 LocalKnown.countMinTrailingZeros())); 1502 } 1503 } 1504 1505 Known.Zero.setLowBits(TrailZ); 1506 break; 1507 } 1508 case Instruction::PHI: { 1509 const PHINode *P = cast<PHINode>(I); 1510 // Handle the case of a simple two-predecessor recurrence PHI. 1511 // There's a lot more that could theoretically be done here, but 1512 // this is sufficient to catch some interesting cases. 1513 if (P->getNumIncomingValues() == 2) { 1514 for (unsigned i = 0; i != 2; ++i) { 1515 Value *L = P->getIncomingValue(i); 1516 Value *R = P->getIncomingValue(!i); 1517 Instruction *RInst = P->getIncomingBlock(!i)->getTerminator(); 1518 Instruction *LInst = P->getIncomingBlock(i)->getTerminator(); 1519 Operator *LU = dyn_cast<Operator>(L); 1520 if (!LU) 1521 continue; 1522 unsigned Opcode = LU->getOpcode(); 1523 // Check for operations that have the property that if 1524 // both their operands have low zero bits, the result 1525 // will have low zero bits. 1526 if (Opcode == Instruction::Add || 1527 Opcode == Instruction::Sub || 1528 Opcode == Instruction::And || 1529 Opcode == Instruction::Or || 1530 Opcode == Instruction::Mul) { 1531 Value *LL = LU->getOperand(0); 1532 Value *LR = LU->getOperand(1); 1533 // Find a recurrence. 1534 if (LL == I) 1535 L = LR; 1536 else if (LR == I) 1537 L = LL; 1538 else 1539 continue; // Check for recurrence with L and R flipped. 1540 1541 // Change the context instruction to the "edge" that flows into the 1542 // phi. This is important because that is where the value is actually 1543 // "evaluated" even though it is used later somewhere else. (see also 1544 // D69571). 1545 Query RecQ = Q; 1546 1547 // Ok, we have a PHI of the form L op= R. Check for low 1548 // zero bits. 1549 RecQ.CxtI = RInst; 1550 computeKnownBits(R, Known2, Depth + 1, RecQ); 1551 1552 // We need to take the minimum number of known bits 1553 KnownBits Known3(BitWidth); 1554 RecQ.CxtI = LInst; 1555 computeKnownBits(L, Known3, Depth + 1, RecQ); 1556 1557 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1558 Known3.countMinTrailingZeros())); 1559 1560 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU); 1561 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) { 1562 // If initial value of recurrence is nonnegative, and we are adding 1563 // a nonnegative number with nsw, the result can only be nonnegative 1564 // or poison value regardless of the number of times we execute the 1565 // add in phi recurrence. If initial value is negative and we are 1566 // adding a negative number with nsw, the result can only be 1567 // negative or poison value. Similar arguments apply to sub and mul. 1568 // 1569 // (add non-negative, non-negative) --> non-negative 1570 // (add negative, negative) --> negative 1571 if (Opcode == Instruction::Add) { 1572 if (Known2.isNonNegative() && Known3.isNonNegative()) 1573 Known.makeNonNegative(); 1574 else if (Known2.isNegative() && Known3.isNegative()) 1575 Known.makeNegative(); 1576 } 1577 1578 // (sub nsw non-negative, negative) --> non-negative 1579 // (sub nsw negative, non-negative) --> negative 1580 else if (Opcode == Instruction::Sub && LL == I) { 1581 if (Known2.isNonNegative() && Known3.isNegative()) 1582 Known.makeNonNegative(); 1583 else if (Known2.isNegative() && Known3.isNonNegative()) 1584 Known.makeNegative(); 1585 } 1586 1587 // (mul nsw non-negative, non-negative) --> non-negative 1588 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1589 Known3.isNonNegative()) 1590 Known.makeNonNegative(); 1591 } 1592 1593 break; 1594 } 1595 } 1596 } 1597 1598 // Unreachable blocks may have zero-operand PHI nodes. 1599 if (P->getNumIncomingValues() == 0) 1600 break; 1601 1602 // Otherwise take the unions of the known bit sets of the operands, 1603 // taking conservative care to avoid excessive recursion. 1604 if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) { 1605 // Skip if every incoming value references to ourself. 1606 if (dyn_cast_or_null<UndefValue>(P->hasConstantValue())) 1607 break; 1608 1609 Known.Zero.setAllBits(); 1610 Known.One.setAllBits(); 1611 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) { 1612 Value *IncValue = P->getIncomingValue(u); 1613 // Skip direct self references. 1614 if (IncValue == P) continue; 1615 1616 // Change the context instruction to the "edge" that flows into the 1617 // phi. This is important because that is where the value is actually 1618 // "evaluated" even though it is used later somewhere else. (see also 1619 // D69571). 1620 Query RecQ = Q; 1621 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator(); 1622 1623 Known2 = KnownBits(BitWidth); 1624 // Recurse, but cap the recursion to one level, because we don't 1625 // want to waste time spinning around in loops. 1626 computeKnownBits(IncValue, Known2, MaxDepth - 1, RecQ); 1627 Known.Zero &= Known2.Zero; 1628 Known.One &= Known2.One; 1629 // If all bits have been ruled out, there's no need to check 1630 // more operands. 1631 if (!Known.Zero && !Known.One) 1632 break; 1633 } 1634 } 1635 break; 1636 } 1637 case Instruction::Call: 1638 case Instruction::Invoke: 1639 // If range metadata is attached to this call, set known bits from that, 1640 // and then intersect with known bits based on other properties of the 1641 // function. 1642 if (MDNode *MD = 1643 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range)) 1644 computeKnownBitsFromRangeMetadata(*MD, Known); 1645 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) { 1646 computeKnownBits(RV, Known2, Depth + 1, Q); 1647 Known.Zero |= Known2.Zero; 1648 Known.One |= Known2.One; 1649 } 1650 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1651 switch (II->getIntrinsicID()) { 1652 default: break; 1653 case Intrinsic::bitreverse: 1654 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1655 Known.Zero |= Known2.Zero.reverseBits(); 1656 Known.One |= Known2.One.reverseBits(); 1657 break; 1658 case Intrinsic::bswap: 1659 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1660 Known.Zero |= Known2.Zero.byteSwap(); 1661 Known.One |= Known2.One.byteSwap(); 1662 break; 1663 case Intrinsic::ctlz: { 1664 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1665 // If we have a known 1, its position is our upper bound. 1666 unsigned PossibleLZ = Known2.One.countLeadingZeros(); 1667 // If this call is undefined for 0, the result will be less than 2^n. 1668 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1669 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1670 unsigned LowBits = Log2_32(PossibleLZ)+1; 1671 Known.Zero.setBitsFrom(LowBits); 1672 break; 1673 } 1674 case Intrinsic::cttz: { 1675 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1676 // If we have a known 1, its position is our upper bound. 1677 unsigned PossibleTZ = Known2.One.countTrailingZeros(); 1678 // If this call is undefined for 0, the result will be less than 2^n. 1679 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1680 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1681 unsigned LowBits = Log2_32(PossibleTZ)+1; 1682 Known.Zero.setBitsFrom(LowBits); 1683 break; 1684 } 1685 case Intrinsic::ctpop: { 1686 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1687 // We can bound the space the count needs. Also, bits known to be zero 1688 // can't contribute to the population. 1689 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1690 unsigned LowBits = Log2_32(BitsPossiblySet)+1; 1691 Known.Zero.setBitsFrom(LowBits); 1692 // TODO: we could bound KnownOne using the lower bound on the number 1693 // of bits which might be set provided by popcnt KnownOne2. 1694 break; 1695 } 1696 case Intrinsic::fshr: 1697 case Intrinsic::fshl: { 1698 const APInt *SA; 1699 if (!match(I->getOperand(2), m_APInt(SA))) 1700 break; 1701 1702 // Normalize to funnel shift left. 1703 uint64_t ShiftAmt = SA->urem(BitWidth); 1704 if (II->getIntrinsicID() == Intrinsic::fshr) 1705 ShiftAmt = BitWidth - ShiftAmt; 1706 1707 KnownBits Known3(BitWidth); 1708 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1709 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q); 1710 1711 Known.Zero = 1712 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); 1713 Known.One = 1714 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt); 1715 break; 1716 } 1717 case Intrinsic::uadd_sat: 1718 case Intrinsic::usub_sat: { 1719 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat; 1720 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1721 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1722 1723 // Add: Leading ones of either operand are preserved. 1724 // Sub: Leading zeros of LHS and leading ones of RHS are preserved 1725 // as leading zeros in the result. 1726 unsigned LeadingKnown; 1727 if (IsAdd) 1728 LeadingKnown = std::max(Known.countMinLeadingOnes(), 1729 Known2.countMinLeadingOnes()); 1730 else 1731 LeadingKnown = std::max(Known.countMinLeadingZeros(), 1732 Known2.countMinLeadingOnes()); 1733 1734 Known = KnownBits::computeForAddSub( 1735 IsAdd, /* NSW */ false, Known, Known2); 1736 1737 // We select between the operation result and all-ones/zero 1738 // respectively, so we can preserve known ones/zeros. 1739 if (IsAdd) { 1740 Known.One.setHighBits(LeadingKnown); 1741 Known.Zero.clearAllBits(); 1742 } else { 1743 Known.Zero.setHighBits(LeadingKnown); 1744 Known.One.clearAllBits(); 1745 } 1746 break; 1747 } 1748 case Intrinsic::x86_sse42_crc32_64_64: 1749 Known.Zero.setBitsFrom(32); 1750 break; 1751 } 1752 } 1753 break; 1754 case Instruction::ShuffleVector: { 1755 auto *Shuf = dyn_cast<ShuffleVectorInst>(I); 1756 // FIXME: Do we need to handle ConstantExpr involving shufflevectors? 1757 if (!Shuf) { 1758 Known.resetAll(); 1759 return; 1760 } 1761 // For undef elements, we don't know anything about the common state of 1762 // the shuffle result. 1763 APInt DemandedLHS, DemandedRHS; 1764 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) { 1765 Known.resetAll(); 1766 return; 1767 } 1768 Known.One.setAllBits(); 1769 Known.Zero.setAllBits(); 1770 if (!!DemandedLHS) { 1771 const Value *LHS = Shuf->getOperand(0); 1772 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q); 1773 // If we don't know any bits, early out. 1774 if (Known.isUnknown()) 1775 break; 1776 } 1777 if (!!DemandedRHS) { 1778 const Value *RHS = Shuf->getOperand(1); 1779 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q); 1780 Known.One &= Known2.One; 1781 Known.Zero &= Known2.Zero; 1782 } 1783 break; 1784 } 1785 case Instruction::InsertElement: { 1786 const Value *Vec = I->getOperand(0); 1787 const Value *Elt = I->getOperand(1); 1788 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2)); 1789 // Early out if the index is non-constant or out-of-range. 1790 unsigned NumElts = DemandedElts.getBitWidth(); 1791 if (!CIdx || CIdx->getValue().uge(NumElts)) { 1792 Known.resetAll(); 1793 return; 1794 } 1795 Known.One.setAllBits(); 1796 Known.Zero.setAllBits(); 1797 unsigned EltIdx = CIdx->getZExtValue(); 1798 // Do we demand the inserted element? 1799 if (DemandedElts[EltIdx]) { 1800 computeKnownBits(Elt, Known, Depth + 1, Q); 1801 // If we don't know any bits, early out. 1802 if (Known.isUnknown()) 1803 break; 1804 } 1805 // We don't need the base vector element that has been inserted. 1806 APInt DemandedVecElts = DemandedElts; 1807 DemandedVecElts.clearBit(EltIdx); 1808 if (!!DemandedVecElts) { 1809 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q); 1810 Known.One &= Known2.One; 1811 Known.Zero &= Known2.Zero; 1812 } 1813 break; 1814 } 1815 case Instruction::ExtractElement: { 1816 // Look through extract element. If the index is non-constant or 1817 // out-of-range demand all elements, otherwise just the extracted element. 1818 const Value *Vec = I->getOperand(0); 1819 const Value *Idx = I->getOperand(1); 1820 auto *CIdx = dyn_cast<ConstantInt>(Idx); 1821 if (isa<ScalableVectorType>(Vec->getType())) { 1822 // FIXME: there's probably *something* we can do with scalable vectors 1823 Known.resetAll(); 1824 break; 1825 } 1826 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 1827 APInt DemandedVecElts = APInt::getAllOnesValue(NumElts); 1828 if (CIdx && CIdx->getValue().ult(NumElts)) 1829 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); 1830 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q); 1831 break; 1832 } 1833 case Instruction::ExtractValue: 1834 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1835 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1836 if (EVI->getNumIndices() != 1) break; 1837 if (EVI->getIndices()[0] == 0) { 1838 switch (II->getIntrinsicID()) { 1839 default: break; 1840 case Intrinsic::uadd_with_overflow: 1841 case Intrinsic::sadd_with_overflow: 1842 computeKnownBitsAddSub(true, II->getArgOperand(0), 1843 II->getArgOperand(1), false, DemandedElts, 1844 Known, Known2, Depth, Q); 1845 break; 1846 case Intrinsic::usub_with_overflow: 1847 case Intrinsic::ssub_with_overflow: 1848 computeKnownBitsAddSub(false, II->getArgOperand(0), 1849 II->getArgOperand(1), false, DemandedElts, 1850 Known, Known2, Depth, Q); 1851 break; 1852 case Intrinsic::umul_with_overflow: 1853 case Intrinsic::smul_with_overflow: 1854 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1855 DemandedElts, Known, Known2, Depth, Q); 1856 break; 1857 } 1858 } 1859 } 1860 break; 1861 } 1862 } 1863 1864 /// Determine which bits of V are known to be either zero or one and return 1865 /// them. 1866 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, 1867 unsigned Depth, const Query &Q) { 1868 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1869 computeKnownBits(V, DemandedElts, Known, Depth, Q); 1870 return Known; 1871 } 1872 1873 /// Determine which bits of V are known to be either zero or one and return 1874 /// them. 1875 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { 1876 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1877 computeKnownBits(V, Known, Depth, Q); 1878 return Known; 1879 } 1880 1881 /// Determine which bits of V are known to be either zero or one and return 1882 /// them in the Known bit set. 1883 /// 1884 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1885 /// we cannot optimize based on the assumption that it is zero without changing 1886 /// it to be an explicit zero. If we don't change it to zero, other code could 1887 /// optimized based on the contradictory assumption that it is non-zero. 1888 /// Because instcombine aggressively folds operations with undef args anyway, 1889 /// this won't lose us code quality. 1890 /// 1891 /// This function is defined on values with integer type, values with pointer 1892 /// type, and vectors of integers. In the case 1893 /// where V is a vector, known zero, and known one values are the 1894 /// same width as the vector element, and the bit is set only if it is true 1895 /// for all of the demanded elements in the vector specified by DemandedElts. 1896 void computeKnownBits(const Value *V, const APInt &DemandedElts, 1897 KnownBits &Known, unsigned Depth, const Query &Q) { 1898 if (!DemandedElts || isa<ScalableVectorType>(V->getType())) { 1899 // No demanded elts or V is a scalable vector, better to assume we don't 1900 // know anything. 1901 Known.resetAll(); 1902 return; 1903 } 1904 1905 assert(V && "No Value?"); 1906 assert(Depth <= MaxDepth && "Limit Search Depth"); 1907 1908 #ifndef NDEBUG 1909 Type *Ty = V->getType(); 1910 unsigned BitWidth = Known.getBitWidth(); 1911 1912 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && 1913 "Not integer or pointer type!"); 1914 1915 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { 1916 assert( 1917 FVTy->getNumElements() == DemandedElts.getBitWidth() && 1918 "DemandedElt width should equal the fixed vector number of elements"); 1919 } else { 1920 assert(DemandedElts == APInt(1, 1) && 1921 "DemandedElt width should be 1 for scalars"); 1922 } 1923 1924 Type *ScalarTy = Ty->getScalarType(); 1925 if (ScalarTy->isPointerTy()) { 1926 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && 1927 "V and Known should have same BitWidth"); 1928 } else { 1929 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && 1930 "V and Known should have same BitWidth"); 1931 } 1932 #endif 1933 1934 const APInt *C; 1935 if (match(V, m_APInt(C))) { 1936 // We know all of the bits for a scalar constant or a splat vector constant! 1937 Known.One = *C; 1938 Known.Zero = ~Known.One; 1939 return; 1940 } 1941 // Null and aggregate-zero are all-zeros. 1942 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1943 Known.setAllZero(); 1944 return; 1945 } 1946 // Handle a constant vector by taking the intersection of the known bits of 1947 // each element. 1948 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) { 1949 // We know that CDV must be a vector of integers. Take the intersection of 1950 // each element. 1951 Known.Zero.setAllBits(); Known.One.setAllBits(); 1952 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) { 1953 if (!DemandedElts[i]) 1954 continue; 1955 APInt Elt = CDV->getElementAsAPInt(i); 1956 Known.Zero &= ~Elt; 1957 Known.One &= Elt; 1958 } 1959 return; 1960 } 1961 1962 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1963 // We know that CV must be a vector of integers. Take the intersection of 1964 // each element. 1965 Known.Zero.setAllBits(); Known.One.setAllBits(); 1966 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1967 if (!DemandedElts[i]) 1968 continue; 1969 Constant *Element = CV->getAggregateElement(i); 1970 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1971 if (!ElementCI) { 1972 Known.resetAll(); 1973 return; 1974 } 1975 const APInt &Elt = ElementCI->getValue(); 1976 Known.Zero &= ~Elt; 1977 Known.One &= Elt; 1978 } 1979 return; 1980 } 1981 1982 // Start out not knowing anything. 1983 Known.resetAll(); 1984 1985 // We can't imply anything about undefs. 1986 if (isa<UndefValue>(V)) 1987 return; 1988 1989 // There's no point in looking through other users of ConstantData for 1990 // assumptions. Confirm that we've handled them all. 1991 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 1992 1993 // Limit search depth. 1994 // All recursive calls that increase depth must come after this. 1995 if (Depth == MaxDepth) 1996 return; 1997 1998 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 1999 // the bits of its aliasee. 2000 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 2001 if (!GA->isInterposable()) 2002 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 2003 return; 2004 } 2005 2006 if (const Operator *I = dyn_cast<Operator>(V)) 2007 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q); 2008 2009 // Aligned pointers have trailing zeros - refine Known.Zero set 2010 if (isa<PointerType>(V->getType())) { 2011 Align Alignment = V->getPointerAlignment(Q.DL); 2012 Known.Zero.setLowBits(countTrailingZeros(Alignment.value())); 2013 } 2014 2015 // computeKnownBitsFromAssume strictly refines Known. 2016 // Therefore, we run them after computeKnownBitsFromOperator. 2017 2018 // Check whether a nearby assume intrinsic can determine some known bits. 2019 computeKnownBitsFromAssume(V, Known, Depth, Q); 2020 2021 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 2022 } 2023 2024 /// Return true if the given value is known to have exactly one 2025 /// bit set when defined. For vectors return true if every element is known to 2026 /// be a power of two when defined. Supports values with integer or pointer 2027 /// types and vectors of integers. 2028 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 2029 const Query &Q) { 2030 assert(Depth <= MaxDepth && "Limit Search Depth"); 2031 2032 // Attempt to match against constants. 2033 if (OrZero && match(V, m_Power2OrZero())) 2034 return true; 2035 if (match(V, m_Power2())) 2036 return true; 2037 2038 // 1 << X is clearly a power of two if the one is not shifted off the end. If 2039 // it is shifted off the end then the result is undefined. 2040 if (match(V, m_Shl(m_One(), m_Value()))) 2041 return true; 2042 2043 // (signmask) >>l X is clearly a power of two if the one is not shifted off 2044 // the bottom. If it is shifted off the bottom then the result is undefined. 2045 if (match(V, m_LShr(m_SignMask(), m_Value()))) 2046 return true; 2047 2048 // The remaining tests are all recursive, so bail out if we hit the limit. 2049 if (Depth++ == MaxDepth) 2050 return false; 2051 2052 Value *X = nullptr, *Y = nullptr; 2053 // A shift left or a logical shift right of a power of two is a power of two 2054 // or zero. 2055 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 2056 match(V, m_LShr(m_Value(X), m_Value())))) 2057 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 2058 2059 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 2060 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 2061 2062 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 2063 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 2064 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 2065 2066 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 2067 // A power of two and'd with anything is a power of two or zero. 2068 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 2069 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 2070 return true; 2071 // X & (-X) is always a power of two or zero. 2072 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 2073 return true; 2074 return false; 2075 } 2076 2077 // Adding a power-of-two or zero to the same power-of-two or zero yields 2078 // either the original power-of-two, a larger power-of-two or zero. 2079 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2080 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 2081 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) || 2082 Q.IIQ.hasNoSignedWrap(VOBO)) { 2083 if (match(X, m_And(m_Specific(Y), m_Value())) || 2084 match(X, m_And(m_Value(), m_Specific(Y)))) 2085 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 2086 return true; 2087 if (match(Y, m_And(m_Specific(X), m_Value())) || 2088 match(Y, m_And(m_Value(), m_Specific(X)))) 2089 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 2090 return true; 2091 2092 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 2093 KnownBits LHSBits(BitWidth); 2094 computeKnownBits(X, LHSBits, Depth, Q); 2095 2096 KnownBits RHSBits(BitWidth); 2097 computeKnownBits(Y, RHSBits, Depth, Q); 2098 // If i8 V is a power of two or zero: 2099 // ZeroBits: 1 1 1 0 1 1 1 1 2100 // ~ZeroBits: 0 0 0 1 0 0 0 0 2101 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 2102 // If OrZero isn't set, we cannot give back a zero result. 2103 // Make sure either the LHS or RHS has a bit set. 2104 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 2105 return true; 2106 } 2107 } 2108 2109 // An exact divide or right shift can only shift off zero bits, so the result 2110 // is a power of two only if the first operand is a power of two and not 2111 // copying a sign bit (sdiv int_min, 2). 2112 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 2113 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 2114 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 2115 Depth, Q); 2116 } 2117 2118 return false; 2119 } 2120 2121 /// Test whether a GEP's result is known to be non-null. 2122 /// 2123 /// Uses properties inherent in a GEP to try to determine whether it is known 2124 /// to be non-null. 2125 /// 2126 /// Currently this routine does not support vector GEPs. 2127 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 2128 const Query &Q) { 2129 const Function *F = nullptr; 2130 if (const Instruction *I = dyn_cast<Instruction>(GEP)) 2131 F = I->getFunction(); 2132 2133 if (!GEP->isInBounds() || 2134 NullPointerIsDefined(F, GEP->getPointerAddressSpace())) 2135 return false; 2136 2137 // FIXME: Support vector-GEPs. 2138 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 2139 2140 // If the base pointer is non-null, we cannot walk to a null address with an 2141 // inbounds GEP in address space zero. 2142 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 2143 return true; 2144 2145 // Walk the GEP operands and see if any operand introduces a non-zero offset. 2146 // If so, then the GEP cannot produce a null pointer, as doing so would 2147 // inherently violate the inbounds contract within address space zero. 2148 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 2149 GTI != GTE; ++GTI) { 2150 // Struct types are easy -- they must always be indexed by a constant. 2151 if (StructType *STy = GTI.getStructTypeOrNull()) { 2152 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 2153 unsigned ElementIdx = OpC->getZExtValue(); 2154 const StructLayout *SL = Q.DL.getStructLayout(STy); 2155 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 2156 if (ElementOffset > 0) 2157 return true; 2158 continue; 2159 } 2160 2161 // If we have a zero-sized type, the index doesn't matter. Keep looping. 2162 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0) 2163 continue; 2164 2165 // Fast path the constant operand case both for efficiency and so we don't 2166 // increment Depth when just zipping down an all-constant GEP. 2167 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 2168 if (!OpC->isZero()) 2169 return true; 2170 continue; 2171 } 2172 2173 // We post-increment Depth here because while isKnownNonZero increments it 2174 // as well, when we pop back up that increment won't persist. We don't want 2175 // to recurse 10k times just because we have 10k GEP operands. We don't 2176 // bail completely out because we want to handle constant GEPs regardless 2177 // of depth. 2178 if (Depth++ >= MaxDepth) 2179 continue; 2180 2181 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 2182 return true; 2183 } 2184 2185 return false; 2186 } 2187 2188 static bool isKnownNonNullFromDominatingCondition(const Value *V, 2189 const Instruction *CtxI, 2190 const DominatorTree *DT) { 2191 if (isa<Constant>(V)) 2192 return false; 2193 2194 if (!CtxI || !DT) 2195 return false; 2196 2197 unsigned NumUsesExplored = 0; 2198 for (auto *U : V->users()) { 2199 // Avoid massive lists 2200 if (NumUsesExplored >= DomConditionsMaxUses) 2201 break; 2202 NumUsesExplored++; 2203 2204 // If the value is used as an argument to a call or invoke, then argument 2205 // attributes may provide an answer about null-ness. 2206 if (const auto *CB = dyn_cast<CallBase>(U)) 2207 if (auto *CalledFunc = CB->getCalledFunction()) 2208 for (const Argument &Arg : CalledFunc->args()) 2209 if (CB->getArgOperand(Arg.getArgNo()) == V && 2210 Arg.hasNonNullAttr() && DT->dominates(CB, CtxI)) 2211 return true; 2212 2213 // If the value is used as a load/store, then the pointer must be non null. 2214 if (V == getLoadStorePointerOperand(U)) { 2215 const Instruction *I = cast<Instruction>(U); 2216 if (!NullPointerIsDefined(I->getFunction(), 2217 V->getType()->getPointerAddressSpace()) && 2218 DT->dominates(I, CtxI)) 2219 return true; 2220 } 2221 2222 // Consider only compare instructions uniquely controlling a branch 2223 CmpInst::Predicate Pred; 2224 if (!match(const_cast<User *>(U), 2225 m_c_ICmp(Pred, m_Specific(V), m_Zero())) || 2226 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)) 2227 continue; 2228 2229 SmallVector<const User *, 4> WorkList; 2230 SmallPtrSet<const User *, 4> Visited; 2231 for (auto *CmpU : U->users()) { 2232 assert(WorkList.empty() && "Should be!"); 2233 if (Visited.insert(CmpU).second) 2234 WorkList.push_back(CmpU); 2235 2236 while (!WorkList.empty()) { 2237 auto *Curr = WorkList.pop_back_val(); 2238 2239 // If a user is an AND, add all its users to the work list. We only 2240 // propagate "pred != null" condition through AND because it is only 2241 // correct to assume that all conditions of AND are met in true branch. 2242 // TODO: Support similar logic of OR and EQ predicate? 2243 if (Pred == ICmpInst::ICMP_NE) 2244 if (auto *BO = dyn_cast<BinaryOperator>(Curr)) 2245 if (BO->getOpcode() == Instruction::And) { 2246 for (auto *BOU : BO->users()) 2247 if (Visited.insert(BOU).second) 2248 WorkList.push_back(BOU); 2249 continue; 2250 } 2251 2252 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) { 2253 assert(BI->isConditional() && "uses a comparison!"); 2254 2255 BasicBlock *NonNullSuccessor = 2256 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0); 2257 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 2258 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 2259 return true; 2260 } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) && 2261 DT->dominates(cast<Instruction>(Curr), CtxI)) { 2262 return true; 2263 } 2264 } 2265 } 2266 } 2267 2268 return false; 2269 } 2270 2271 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 2272 /// ensure that the value it's attached to is never Value? 'RangeType' is 2273 /// is the type of the value described by the range. 2274 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 2275 const unsigned NumRanges = Ranges->getNumOperands() / 2; 2276 assert(NumRanges >= 1); 2277 for (unsigned i = 0; i < NumRanges; ++i) { 2278 ConstantInt *Lower = 2279 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 2280 ConstantInt *Upper = 2281 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 2282 ConstantRange Range(Lower->getValue(), Upper->getValue()); 2283 if (Range.contains(Value)) 2284 return false; 2285 } 2286 return true; 2287 } 2288 2289 /// Return true if the given value is known to be non-zero when defined. For 2290 /// vectors, return true if every demanded element is known to be non-zero when 2291 /// defined. For pointers, if the context instruction and dominator tree are 2292 /// specified, perform context-sensitive analysis and return true if the 2293 /// pointer couldn't possibly be null at the specified instruction. 2294 /// Supports values with integer or pointer type and vectors of integers. 2295 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, 2296 const Query &Q) { 2297 // FIXME: We currently have no way to represent the DemandedElts of a scalable 2298 // vector 2299 if (isa<ScalableVectorType>(V->getType())) 2300 return false; 2301 2302 if (auto *C = dyn_cast<Constant>(V)) { 2303 if (C->isNullValue()) 2304 return false; 2305 if (isa<ConstantInt>(C)) 2306 // Must be non-zero due to null test above. 2307 return true; 2308 2309 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 2310 // See the comment for IntToPtr/PtrToInt instructions below. 2311 if (CE->getOpcode() == Instruction::IntToPtr || 2312 CE->getOpcode() == Instruction::PtrToInt) 2313 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) <= 2314 Q.DL.getTypeSizeInBits(CE->getType())) 2315 return isKnownNonZero(CE->getOperand(0), Depth, Q); 2316 } 2317 2318 // For constant vectors, check that all elements are undefined or known 2319 // non-zero to determine that the whole vector is known non-zero. 2320 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) { 2321 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 2322 if (!DemandedElts[i]) 2323 continue; 2324 Constant *Elt = C->getAggregateElement(i); 2325 if (!Elt || Elt->isNullValue()) 2326 return false; 2327 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 2328 return false; 2329 } 2330 return true; 2331 } 2332 2333 // A global variable in address space 0 is non null unless extern weak 2334 // or an absolute symbol reference. Other address spaces may have null as a 2335 // valid address for a global, so we can't assume anything. 2336 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 2337 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 2338 GV->getType()->getAddressSpace() == 0) 2339 return true; 2340 } else 2341 return false; 2342 } 2343 2344 if (auto *I = dyn_cast<Instruction>(V)) { 2345 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) { 2346 // If the possible ranges don't contain zero, then the value is 2347 // definitely non-zero. 2348 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 2349 const APInt ZeroValue(Ty->getBitWidth(), 0); 2350 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 2351 return true; 2352 } 2353 } 2354 } 2355 2356 if (isKnownNonZeroFromAssume(V, Q)) 2357 return true; 2358 2359 // Some of the tests below are recursive, so bail out if we hit the limit. 2360 if (Depth++ >= MaxDepth) 2361 return false; 2362 2363 // Check for pointer simplifications. 2364 if (V->getType()->isPointerTy()) { 2365 // Alloca never returns null, malloc might. 2366 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) 2367 return true; 2368 2369 // A byval, inalloca, or nonnull argument is never null. 2370 if (const Argument *A = dyn_cast<Argument>(V)) 2371 if (A->hasPassPointeeByValueAttr() || A->hasNonNullAttr()) 2372 return true; 2373 2374 // A Load tagged with nonnull metadata is never null. 2375 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 2376 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull)) 2377 return true; 2378 2379 if (const auto *Call = dyn_cast<CallBase>(V)) { 2380 if (Call->isReturnNonNull()) 2381 return true; 2382 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true)) 2383 return isKnownNonZero(RP, Depth, Q); 2384 } 2385 } 2386 2387 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) 2388 return true; 2389 2390 // Check for recursive pointer simplifications. 2391 if (V->getType()->isPointerTy()) { 2392 // Look through bitcast operations, GEPs, and int2ptr instructions as they 2393 // do not alter the value, or at least not the nullness property of the 2394 // value, e.g., int2ptr is allowed to zero/sign extend the value. 2395 // 2396 // Note that we have to take special care to avoid looking through 2397 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well 2398 // as casts that can alter the value, e.g., AddrSpaceCasts. 2399 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 2400 if (isGEPKnownNonNull(GEP, Depth, Q)) 2401 return true; 2402 2403 if (auto *BCO = dyn_cast<BitCastOperator>(V)) 2404 return isKnownNonZero(BCO->getOperand(0), Depth, Q); 2405 2406 if (auto *I2P = dyn_cast<IntToPtrInst>(V)) 2407 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <= 2408 Q.DL.getTypeSizeInBits(I2P->getDestTy())) 2409 return isKnownNonZero(I2P->getOperand(0), Depth, Q); 2410 } 2411 2412 // Similar to int2ptr above, we can look through ptr2int here if the cast 2413 // is a no-op or an extend and not a truncate. 2414 if (auto *P2I = dyn_cast<PtrToIntInst>(V)) 2415 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <= 2416 Q.DL.getTypeSizeInBits(P2I->getDestTy())) 2417 return isKnownNonZero(P2I->getOperand(0), Depth, Q); 2418 2419 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 2420 2421 // X | Y != 0 if X != 0 or Y != 0. 2422 Value *X = nullptr, *Y = nullptr; 2423 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 2424 return isKnownNonZero(X, DemandedElts, Depth, Q) || 2425 isKnownNonZero(Y, DemandedElts, Depth, Q); 2426 2427 // ext X != 0 if X != 0. 2428 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 2429 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 2430 2431 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 2432 // if the lowest bit is shifted off the end. 2433 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { 2434 // shl nuw can't remove any non-zero bits. 2435 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2436 if (Q.IIQ.hasNoUnsignedWrap(BO)) 2437 return isKnownNonZero(X, Depth, Q); 2438 2439 KnownBits Known(BitWidth); 2440 computeKnownBits(X, DemandedElts, Known, Depth, Q); 2441 if (Known.One[0]) 2442 return true; 2443 } 2444 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 2445 // defined if the sign bit is shifted off the end. 2446 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 2447 // shr exact can only shift out zero bits. 2448 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 2449 if (BO->isExact()) 2450 return isKnownNonZero(X, Depth, Q); 2451 2452 KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q); 2453 if (Known.isNegative()) 2454 return true; 2455 2456 // If the shifter operand is a constant, and all of the bits shifted 2457 // out are known to be zero, and X is known non-zero then at least one 2458 // non-zero bit must remain. 2459 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 2460 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 2461 // Is there a known one in the portion not shifted out? 2462 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) 2463 return true; 2464 // Are all the bits to be shifted out known zero? 2465 if (Known.countMinTrailingZeros() >= ShiftVal) 2466 return isKnownNonZero(X, DemandedElts, Depth, Q); 2467 } 2468 } 2469 // div exact can only produce a zero if the dividend is zero. 2470 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 2471 return isKnownNonZero(X, DemandedElts, Depth, Q); 2472 } 2473 // X + Y. 2474 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2475 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); 2476 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); 2477 2478 // If X and Y are both non-negative (as signed values) then their sum is not 2479 // zero unless both X and Y are zero. 2480 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 2481 if (isKnownNonZero(X, DemandedElts, Depth, Q) || 2482 isKnownNonZero(Y, DemandedElts, Depth, Q)) 2483 return true; 2484 2485 // If X and Y are both negative (as signed values) then their sum is not 2486 // zero unless both X and Y equal INT_MIN. 2487 if (XKnown.isNegative() && YKnown.isNegative()) { 2488 APInt Mask = APInt::getSignedMaxValue(BitWidth); 2489 // The sign bit of X is set. If some other bit is set then X is not equal 2490 // to INT_MIN. 2491 if (XKnown.One.intersects(Mask)) 2492 return true; 2493 // The sign bit of Y is set. If some other bit is set then Y is not equal 2494 // to INT_MIN. 2495 if (YKnown.One.intersects(Mask)) 2496 return true; 2497 } 2498 2499 // The sum of a non-negative number and a power of two is not zero. 2500 if (XKnown.isNonNegative() && 2501 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 2502 return true; 2503 if (YKnown.isNonNegative() && 2504 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 2505 return true; 2506 } 2507 // X * Y. 2508 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 2509 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2510 // If X and Y are non-zero then so is X * Y as long as the multiplication 2511 // does not overflow. 2512 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) && 2513 isKnownNonZero(X, DemandedElts, Depth, Q) && 2514 isKnownNonZero(Y, DemandedElts, Depth, Q)) 2515 return true; 2516 } 2517 // (C ? X : Y) != 0 if X != 0 and Y != 0. 2518 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 2519 if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) && 2520 isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q)) 2521 return true; 2522 } 2523 // PHI 2524 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 2525 // Try and detect a recurrence that monotonically increases from a 2526 // starting value, as these are common as induction variables. 2527 if (PN->getNumIncomingValues() == 2) { 2528 Value *Start = PN->getIncomingValue(0); 2529 Value *Induction = PN->getIncomingValue(1); 2530 if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start)) 2531 std::swap(Start, Induction); 2532 if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) { 2533 if (!C->isZero() && !C->isNegative()) { 2534 ConstantInt *X; 2535 if (Q.IIQ.UseInstrInfo && 2536 (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) || 2537 match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) && 2538 !X->isNegative()) 2539 return true; 2540 } 2541 } 2542 } 2543 // Check if all incoming values are non-zero constant. 2544 bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) { 2545 return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero(); 2546 }); 2547 if (AllNonZeroConstants) 2548 return true; 2549 } 2550 // ExtractElement 2551 else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) { 2552 const Value *Vec = EEI->getVectorOperand(); 2553 const Value *Idx = EEI->getIndexOperand(); 2554 auto *CIdx = dyn_cast<ConstantInt>(Idx); 2555 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 2556 APInt DemandedVecElts = APInt::getAllOnesValue(NumElts); 2557 if (CIdx && CIdx->getValue().ult(NumElts)) 2558 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); 2559 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q); 2560 } 2561 2562 KnownBits Known(BitWidth); 2563 computeKnownBits(V, DemandedElts, Known, Depth, Q); 2564 return Known.One != 0; 2565 } 2566 2567 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) { 2568 // FIXME: We currently have no way to represent the DemandedElts of a scalable 2569 // vector 2570 if (isa<ScalableVectorType>(V->getType())) 2571 return false; 2572 2573 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 2574 APInt DemandedElts = 2575 FVTy ? APInt::getAllOnesValue(FVTy->getNumElements()) : APInt(1, 1); 2576 return isKnownNonZero(V, DemandedElts, Depth, Q); 2577 } 2578 2579 /// Return true if V2 == V1 + X, where X is known non-zero. 2580 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { 2581 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2582 if (!BO || BO->getOpcode() != Instruction::Add) 2583 return false; 2584 Value *Op = nullptr; 2585 if (V2 == BO->getOperand(0)) 2586 Op = BO->getOperand(1); 2587 else if (V2 == BO->getOperand(1)) 2588 Op = BO->getOperand(0); 2589 else 2590 return false; 2591 return isKnownNonZero(Op, 0, Q); 2592 } 2593 2594 /// Return true if it is known that V1 != V2. 2595 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { 2596 if (V1 == V2) 2597 return false; 2598 if (V1->getType() != V2->getType()) 2599 // We can't look through casts yet. 2600 return false; 2601 if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) 2602 return true; 2603 2604 if (V1->getType()->isIntOrIntVectorTy()) { 2605 // Are any known bits in V1 contradictory to known bits in V2? If V1 2606 // has a known zero where V2 has a known one, they must not be equal. 2607 KnownBits Known1 = computeKnownBits(V1, 0, Q); 2608 KnownBits Known2 = computeKnownBits(V2, 0, Q); 2609 2610 if (Known1.Zero.intersects(Known2.One) || 2611 Known2.Zero.intersects(Known1.One)) 2612 return true; 2613 } 2614 return false; 2615 } 2616 2617 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2618 /// simplify operations downstream. Mask is known to be zero for bits that V 2619 /// cannot have. 2620 /// 2621 /// This function is defined on values with integer type, values with pointer 2622 /// type, and vectors of integers. In the case 2623 /// where V is a vector, the mask, known zero, and known one values are the 2624 /// same width as the vector element, and the bit is set only if it is true 2625 /// for all of the elements in the vector. 2626 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2627 const Query &Q) { 2628 KnownBits Known(Mask.getBitWidth()); 2629 computeKnownBits(V, Known, Depth, Q); 2630 return Mask.isSubsetOf(Known.Zero); 2631 } 2632 2633 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow). 2634 // Returns the input and lower/upper bounds. 2635 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, 2636 const APInt *&CLow, const APInt *&CHigh) { 2637 assert(isa<Operator>(Select) && 2638 cast<Operator>(Select)->getOpcode() == Instruction::Select && 2639 "Input should be a Select!"); 2640 2641 const Value *LHS = nullptr, *RHS = nullptr; 2642 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor; 2643 if (SPF != SPF_SMAX && SPF != SPF_SMIN) 2644 return false; 2645 2646 if (!match(RHS, m_APInt(CLow))) 2647 return false; 2648 2649 const Value *LHS2 = nullptr, *RHS2 = nullptr; 2650 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor; 2651 if (getInverseMinMaxFlavor(SPF) != SPF2) 2652 return false; 2653 2654 if (!match(RHS2, m_APInt(CHigh))) 2655 return false; 2656 2657 if (SPF == SPF_SMIN) 2658 std::swap(CLow, CHigh); 2659 2660 In = LHS2; 2661 return CLow->sle(*CHigh); 2662 } 2663 2664 /// For vector constants, loop over the elements and find the constant with the 2665 /// minimum number of sign bits. Return 0 if the value is not a vector constant 2666 /// or if any element was not analyzed; otherwise, return the count for the 2667 /// element with the minimum number of sign bits. 2668 static unsigned computeNumSignBitsVectorConstant(const Value *V, 2669 const APInt &DemandedElts, 2670 unsigned TyBits) { 2671 const auto *CV = dyn_cast<Constant>(V); 2672 if (!CV || !isa<FixedVectorType>(CV->getType())) 2673 return 0; 2674 2675 unsigned MinSignBits = TyBits; 2676 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements(); 2677 for (unsigned i = 0; i != NumElts; ++i) { 2678 if (!DemandedElts[i]) 2679 continue; 2680 // If we find a non-ConstantInt, bail out. 2681 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 2682 if (!Elt) 2683 return 0; 2684 2685 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); 2686 } 2687 2688 return MinSignBits; 2689 } 2690 2691 static unsigned ComputeNumSignBitsImpl(const Value *V, 2692 const APInt &DemandedElts, 2693 unsigned Depth, const Query &Q); 2694 2695 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, 2696 unsigned Depth, const Query &Q) { 2697 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q); 2698 assert(Result > 0 && "At least one sign bit needs to be present!"); 2699 return Result; 2700 } 2701 2702 /// Return the number of times the sign bit of the register is replicated into 2703 /// the other bits. We know that at least 1 bit is always equal to the sign bit 2704 /// (itself), but other cases can give us information. For example, immediately 2705 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 2706 /// other, so we return 3. For vectors, return the number of sign bits for the 2707 /// vector element with the minimum number of known sign bits of the demanded 2708 /// elements in the vector specified by DemandedElts. 2709 static unsigned ComputeNumSignBitsImpl(const Value *V, 2710 const APInt &DemandedElts, 2711 unsigned Depth, const Query &Q) { 2712 Type *Ty = V->getType(); 2713 2714 // FIXME: We currently have no way to represent the DemandedElts of a scalable 2715 // vector 2716 if (isa<ScalableVectorType>(Ty)) 2717 return 1; 2718 2719 #ifndef NDEBUG 2720 assert(Depth <= MaxDepth && "Limit Search Depth"); 2721 2722 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { 2723 assert( 2724 FVTy->getNumElements() == DemandedElts.getBitWidth() && 2725 "DemandedElt width should equal the fixed vector number of elements"); 2726 } else { 2727 assert(DemandedElts == APInt(1, 1) && 2728 "DemandedElt width should be 1 for scalars"); 2729 } 2730 #endif 2731 2732 // We return the minimum number of sign bits that are guaranteed to be present 2733 // in V, so for undef we have to conservatively return 1. We don't have the 2734 // same behavior for poison though -- that's a FIXME today. 2735 2736 Type *ScalarTy = Ty->getScalarType(); 2737 unsigned TyBits = ScalarTy->isPointerTy() ? 2738 Q.DL.getPointerTypeSizeInBits(ScalarTy) : 2739 Q.DL.getTypeSizeInBits(ScalarTy); 2740 2741 unsigned Tmp, Tmp2; 2742 unsigned FirstAnswer = 1; 2743 2744 // Note that ConstantInt is handled by the general computeKnownBits case 2745 // below. 2746 2747 if (Depth == MaxDepth) 2748 return 1; // Limit search depth. 2749 2750 if (auto *U = dyn_cast<Operator>(V)) { 2751 switch (Operator::getOpcode(V)) { 2752 default: break; 2753 case Instruction::SExt: 2754 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 2755 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 2756 2757 case Instruction::SDiv: { 2758 const APInt *Denominator; 2759 // sdiv X, C -> adds log(C) sign bits. 2760 if (match(U->getOperand(1), m_APInt(Denominator))) { 2761 2762 // Ignore non-positive denominator. 2763 if (!Denominator->isStrictlyPositive()) 2764 break; 2765 2766 // Calculate the incoming numerator bits. 2767 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2768 2769 // Add floor(log(C)) bits to the numerator bits. 2770 return std::min(TyBits, NumBits + Denominator->logBase2()); 2771 } 2772 break; 2773 } 2774 2775 case Instruction::SRem: { 2776 const APInt *Denominator; 2777 // srem X, C -> we know that the result is within [-C+1,C) when C is a 2778 // positive constant. This let us put a lower bound on the number of sign 2779 // bits. 2780 if (match(U->getOperand(1), m_APInt(Denominator))) { 2781 2782 // Ignore non-positive denominator. 2783 if (!Denominator->isStrictlyPositive()) 2784 break; 2785 2786 // Calculate the incoming numerator bits. SRem by a positive constant 2787 // can't lower the number of sign bits. 2788 unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2789 2790 // Calculate the leading sign bit constraints by examining the 2791 // denominator. Given that the denominator is positive, there are two 2792 // cases: 2793 // 2794 // 1. the numerator is positive. The result range is [0,C) and [0,C) u< 2795 // (1 << ceilLogBase2(C)). 2796 // 2797 // 2. the numerator is negative. Then the result range is (-C,0] and 2798 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 2799 // 2800 // Thus a lower bound on the number of sign bits is `TyBits - 2801 // ceilLogBase2(C)`. 2802 2803 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 2804 return std::max(NumrBits, ResBits); 2805 } 2806 break; 2807 } 2808 2809 case Instruction::AShr: { 2810 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2811 // ashr X, C -> adds C sign bits. Vectors too. 2812 const APInt *ShAmt; 2813 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2814 if (ShAmt->uge(TyBits)) 2815 break; // Bad shift. 2816 unsigned ShAmtLimited = ShAmt->getZExtValue(); 2817 Tmp += ShAmtLimited; 2818 if (Tmp > TyBits) Tmp = TyBits; 2819 } 2820 return Tmp; 2821 } 2822 case Instruction::Shl: { 2823 const APInt *ShAmt; 2824 if (match(U->getOperand(1), m_APInt(ShAmt))) { 2825 // shl destroys sign bits. 2826 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2827 if (ShAmt->uge(TyBits) || // Bad shift. 2828 ShAmt->uge(Tmp)) break; // Shifted all sign bits out. 2829 Tmp2 = ShAmt->getZExtValue(); 2830 return Tmp - Tmp2; 2831 } 2832 break; 2833 } 2834 case Instruction::And: 2835 case Instruction::Or: 2836 case Instruction::Xor: // NOT is handled here. 2837 // Logical binary ops preserve the number of sign bits at the worst. 2838 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2839 if (Tmp != 1) { 2840 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2841 FirstAnswer = std::min(Tmp, Tmp2); 2842 // We computed what we know about the sign bits as our first 2843 // answer. Now proceed to the generic code that uses 2844 // computeKnownBits, and pick whichever answer is better. 2845 } 2846 break; 2847 2848 case Instruction::Select: { 2849 // If we have a clamp pattern, we know that the number of sign bits will 2850 // be the minimum of the clamp min/max range. 2851 const Value *X; 2852 const APInt *CLow, *CHigh; 2853 if (isSignedMinMaxClamp(U, X, CLow, CHigh)) 2854 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); 2855 2856 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2857 if (Tmp == 1) break; 2858 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 2859 return std::min(Tmp, Tmp2); 2860 } 2861 2862 case Instruction::Add: 2863 // Add can have at most one carry bit. Thus we know that the output 2864 // is, at worst, one more bit than the inputs. 2865 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2866 if (Tmp == 1) break; 2867 2868 // Special case decrementing a value (ADD X, -1): 2869 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 2870 if (CRHS->isAllOnesValue()) { 2871 KnownBits Known(TyBits); 2872 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 2873 2874 // If the input is known to be 0 or 1, the output is 0/-1, which is 2875 // all sign bits set. 2876 if ((Known.Zero | 1).isAllOnesValue()) 2877 return TyBits; 2878 2879 // If we are subtracting one from a positive number, there is no carry 2880 // out of the result. 2881 if (Known.isNonNegative()) 2882 return Tmp; 2883 } 2884 2885 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2886 if (Tmp2 == 1) break; 2887 return std::min(Tmp, Tmp2) - 1; 2888 2889 case Instruction::Sub: 2890 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2891 if (Tmp2 == 1) break; 2892 2893 // Handle NEG. 2894 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 2895 if (CLHS->isNullValue()) { 2896 KnownBits Known(TyBits); 2897 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 2898 // If the input is known to be 0 or 1, the output is 0/-1, which is 2899 // all sign bits set. 2900 if ((Known.Zero | 1).isAllOnesValue()) 2901 return TyBits; 2902 2903 // If the input is known to be positive (the sign bit is known clear), 2904 // the output of the NEG has the same number of sign bits as the 2905 // input. 2906 if (Known.isNonNegative()) 2907 return Tmp2; 2908 2909 // Otherwise, we treat this like a SUB. 2910 } 2911 2912 // Sub can have at most one carry bit. Thus we know that the output 2913 // is, at worst, one more bit than the inputs. 2914 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2915 if (Tmp == 1) break; 2916 return std::min(Tmp, Tmp2) - 1; 2917 2918 case Instruction::Mul: { 2919 // The output of the Mul can be at most twice the valid bits in the 2920 // inputs. 2921 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2922 if (SignBitsOp0 == 1) break; 2923 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 2924 if (SignBitsOp1 == 1) break; 2925 unsigned OutValidBits = 2926 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); 2927 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; 2928 } 2929 2930 case Instruction::PHI: { 2931 const PHINode *PN = cast<PHINode>(U); 2932 unsigned NumIncomingValues = PN->getNumIncomingValues(); 2933 // Don't analyze large in-degree PHIs. 2934 if (NumIncomingValues > 4) break; 2935 // Unreachable blocks may have zero-operand PHI nodes. 2936 if (NumIncomingValues == 0) break; 2937 2938 // Take the minimum of all incoming values. This can't infinitely loop 2939 // because of our depth threshold. 2940 Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q); 2941 for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) { 2942 if (Tmp == 1) return Tmp; 2943 Tmp = std::min( 2944 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q)); 2945 } 2946 return Tmp; 2947 } 2948 2949 case Instruction::Trunc: 2950 // FIXME: it's tricky to do anything useful for this, but it is an 2951 // important case for targets like X86. 2952 break; 2953 2954 case Instruction::ExtractElement: 2955 // Look through extract element. At the moment we keep this simple and 2956 // skip tracking the specific element. But at least we might find 2957 // information valid for all elements of the vector (for example if vector 2958 // is sign extended, shifted, etc). 2959 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 2960 2961 case Instruction::ShuffleVector: { 2962 // Collect the minimum number of sign bits that are shared by every vector 2963 // element referenced by the shuffle. 2964 auto *Shuf = dyn_cast<ShuffleVectorInst>(U); 2965 if (!Shuf) { 2966 // FIXME: Add support for shufflevector constant expressions. 2967 return 1; 2968 } 2969 APInt DemandedLHS, DemandedRHS; 2970 // For undef elements, we don't know anything about the common state of 2971 // the shuffle result. 2972 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) 2973 return 1; 2974 Tmp = std::numeric_limits<unsigned>::max(); 2975 if (!!DemandedLHS) { 2976 const Value *LHS = Shuf->getOperand(0); 2977 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q); 2978 } 2979 // If we don't know anything, early out and try computeKnownBits 2980 // fall-back. 2981 if (Tmp == 1) 2982 break; 2983 if (!!DemandedRHS) { 2984 const Value *RHS = Shuf->getOperand(1); 2985 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q); 2986 Tmp = std::min(Tmp, Tmp2); 2987 } 2988 // If we don't know anything, early out and try computeKnownBits 2989 // fall-back. 2990 if (Tmp == 1) 2991 break; 2992 assert(Tmp <= Ty->getScalarSizeInBits() && 2993 "Failed to determine minimum sign bits"); 2994 return Tmp; 2995 } 2996 } 2997 } 2998 2999 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3000 // use this information. 3001 3002 // If we can examine all elements of a vector constant successfully, we're 3003 // done (we can't do any better than that). If not, keep trying. 3004 if (unsigned VecSignBits = 3005 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits)) 3006 return VecSignBits; 3007 3008 KnownBits Known(TyBits); 3009 computeKnownBits(V, DemandedElts, Known, Depth, Q); 3010 3011 // If we know that the sign bit is either zero or one, determine the number of 3012 // identical bits in the top of the input value. 3013 return std::max(FirstAnswer, Known.countMinSignBits()); 3014 } 3015 3016 /// This function computes the integer multiple of Base that equals V. 3017 /// If successful, it returns true and returns the multiple in 3018 /// Multiple. If unsuccessful, it returns false. It looks 3019 /// through SExt instructions only if LookThroughSExt is true. 3020 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple, 3021 bool LookThroughSExt, unsigned Depth) { 3022 assert(V && "No Value?"); 3023 assert(Depth <= MaxDepth && "Limit Search Depth"); 3024 assert(V->getType()->isIntegerTy() && "Not integer or pointer type!"); 3025 3026 Type *T = V->getType(); 3027 3028 ConstantInt *CI = dyn_cast<ConstantInt>(V); 3029 3030 if (Base == 0) 3031 return false; 3032 3033 if (Base == 1) { 3034 Multiple = V; 3035 return true; 3036 } 3037 3038 ConstantExpr *CO = dyn_cast<ConstantExpr>(V); 3039 Constant *BaseVal = ConstantInt::get(T, Base); 3040 if (CO && CO == BaseVal) { 3041 // Multiple is 1. 3042 Multiple = ConstantInt::get(T, 1); 3043 return true; 3044 } 3045 3046 if (CI && CI->getZExtValue() % Base == 0) { 3047 Multiple = ConstantInt::get(T, CI->getZExtValue() / Base); 3048 return true; 3049 } 3050 3051 if (Depth == MaxDepth) return false; // Limit search depth. 3052 3053 Operator *I = dyn_cast<Operator>(V); 3054 if (!I) return false; 3055 3056 switch (I->getOpcode()) { 3057 default: break; 3058 case Instruction::SExt: 3059 if (!LookThroughSExt) return false; 3060 // otherwise fall through to ZExt 3061 LLVM_FALLTHROUGH; 3062 case Instruction::ZExt: 3063 return ComputeMultiple(I->getOperand(0), Base, Multiple, 3064 LookThroughSExt, Depth+1); 3065 case Instruction::Shl: 3066 case Instruction::Mul: { 3067 Value *Op0 = I->getOperand(0); 3068 Value *Op1 = I->getOperand(1); 3069 3070 if (I->getOpcode() == Instruction::Shl) { 3071 ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1); 3072 if (!Op1CI) return false; 3073 // Turn Op0 << Op1 into Op0 * 2^Op1 3074 APInt Op1Int = Op1CI->getValue(); 3075 uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1); 3076 APInt API(Op1Int.getBitWidth(), 0); 3077 API.setBit(BitToSet); 3078 Op1 = ConstantInt::get(V->getContext(), API); 3079 } 3080 3081 Value *Mul0 = nullptr; 3082 if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) { 3083 if (Constant *Op1C = dyn_cast<Constant>(Op1)) 3084 if (Constant *MulC = dyn_cast<Constant>(Mul0)) { 3085 if (Op1C->getType()->getPrimitiveSizeInBits() < 3086 MulC->getType()->getPrimitiveSizeInBits()) 3087 Op1C = ConstantExpr::getZExt(Op1C, MulC->getType()); 3088 if (Op1C->getType()->getPrimitiveSizeInBits() > 3089 MulC->getType()->getPrimitiveSizeInBits()) 3090 MulC = ConstantExpr::getZExt(MulC, Op1C->getType()); 3091 3092 // V == Base * (Mul0 * Op1), so return (Mul0 * Op1) 3093 Multiple = ConstantExpr::getMul(MulC, Op1C); 3094 return true; 3095 } 3096 3097 if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0)) 3098 if (Mul0CI->getValue() == 1) { 3099 // V == Base * Op1, so return Op1 3100 Multiple = Op1; 3101 return true; 3102 } 3103 } 3104 3105 Value *Mul1 = nullptr; 3106 if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) { 3107 if (Constant *Op0C = dyn_cast<Constant>(Op0)) 3108 if (Constant *MulC = dyn_cast<Constant>(Mul1)) { 3109 if (Op0C->getType()->getPrimitiveSizeInBits() < 3110 MulC->getType()->getPrimitiveSizeInBits()) 3111 Op0C = ConstantExpr::getZExt(Op0C, MulC->getType()); 3112 if (Op0C->getType()->getPrimitiveSizeInBits() > 3113 MulC->getType()->getPrimitiveSizeInBits()) 3114 MulC = ConstantExpr::getZExt(MulC, Op0C->getType()); 3115 3116 // V == Base * (Mul1 * Op0), so return (Mul1 * Op0) 3117 Multiple = ConstantExpr::getMul(MulC, Op0C); 3118 return true; 3119 } 3120 3121 if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1)) 3122 if (Mul1CI->getValue() == 1) { 3123 // V == Base * Op0, so return Op0 3124 Multiple = Op0; 3125 return true; 3126 } 3127 } 3128 } 3129 } 3130 3131 // We could not determine if V is a multiple of Base. 3132 return false; 3133 } 3134 3135 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB, 3136 const TargetLibraryInfo *TLI) { 3137 const Function *F = CB.getCalledFunction(); 3138 if (!F) 3139 return Intrinsic::not_intrinsic; 3140 3141 if (F->isIntrinsic()) 3142 return F->getIntrinsicID(); 3143 3144 if (!TLI) 3145 return Intrinsic::not_intrinsic; 3146 3147 LibFunc Func; 3148 // We're going to make assumptions on the semantics of the functions, check 3149 // that the target knows that it's available in this environment and it does 3150 // not have local linkage. 3151 if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func)) 3152 return Intrinsic::not_intrinsic; 3153 3154 if (!CB.onlyReadsMemory()) 3155 return Intrinsic::not_intrinsic; 3156 3157 // Otherwise check if we have a call to a function that can be turned into a 3158 // vector intrinsic. 3159 switch (Func) { 3160 default: 3161 break; 3162 case LibFunc_sin: 3163 case LibFunc_sinf: 3164 case LibFunc_sinl: 3165 return Intrinsic::sin; 3166 case LibFunc_cos: 3167 case LibFunc_cosf: 3168 case LibFunc_cosl: 3169 return Intrinsic::cos; 3170 case LibFunc_exp: 3171 case LibFunc_expf: 3172 case LibFunc_expl: 3173 return Intrinsic::exp; 3174 case LibFunc_exp2: 3175 case LibFunc_exp2f: 3176 case LibFunc_exp2l: 3177 return Intrinsic::exp2; 3178 case LibFunc_log: 3179 case LibFunc_logf: 3180 case LibFunc_logl: 3181 return Intrinsic::log; 3182 case LibFunc_log10: 3183 case LibFunc_log10f: 3184 case LibFunc_log10l: 3185 return Intrinsic::log10; 3186 case LibFunc_log2: 3187 case LibFunc_log2f: 3188 case LibFunc_log2l: 3189 return Intrinsic::log2; 3190 case LibFunc_fabs: 3191 case LibFunc_fabsf: 3192 case LibFunc_fabsl: 3193 return Intrinsic::fabs; 3194 case LibFunc_fmin: 3195 case LibFunc_fminf: 3196 case LibFunc_fminl: 3197 return Intrinsic::minnum; 3198 case LibFunc_fmax: 3199 case LibFunc_fmaxf: 3200 case LibFunc_fmaxl: 3201 return Intrinsic::maxnum; 3202 case LibFunc_copysign: 3203 case LibFunc_copysignf: 3204 case LibFunc_copysignl: 3205 return Intrinsic::copysign; 3206 case LibFunc_floor: 3207 case LibFunc_floorf: 3208 case LibFunc_floorl: 3209 return Intrinsic::floor; 3210 case LibFunc_ceil: 3211 case LibFunc_ceilf: 3212 case LibFunc_ceill: 3213 return Intrinsic::ceil; 3214 case LibFunc_trunc: 3215 case LibFunc_truncf: 3216 case LibFunc_truncl: 3217 return Intrinsic::trunc; 3218 case LibFunc_rint: 3219 case LibFunc_rintf: 3220 case LibFunc_rintl: 3221 return Intrinsic::rint; 3222 case LibFunc_nearbyint: 3223 case LibFunc_nearbyintf: 3224 case LibFunc_nearbyintl: 3225 return Intrinsic::nearbyint; 3226 case LibFunc_round: 3227 case LibFunc_roundf: 3228 case LibFunc_roundl: 3229 return Intrinsic::round; 3230 case LibFunc_roundeven: 3231 case LibFunc_roundevenf: 3232 case LibFunc_roundevenl: 3233 return Intrinsic::roundeven; 3234 case LibFunc_pow: 3235 case LibFunc_powf: 3236 case LibFunc_powl: 3237 return Intrinsic::pow; 3238 case LibFunc_sqrt: 3239 case LibFunc_sqrtf: 3240 case LibFunc_sqrtl: 3241 return Intrinsic::sqrt; 3242 } 3243 3244 return Intrinsic::not_intrinsic; 3245 } 3246 3247 /// Return true if we can prove that the specified FP value is never equal to 3248 /// -0.0. 3249 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee 3250 /// that a value is not -0.0. It only guarantees that -0.0 may be treated 3251 /// the same as +0.0 in floating-point ops. 3252 /// 3253 /// NOTE: this function will need to be revisited when we support non-default 3254 /// rounding modes! 3255 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 3256 unsigned Depth) { 3257 if (auto *CFP = dyn_cast<ConstantFP>(V)) 3258 return !CFP->getValueAPF().isNegZero(); 3259 3260 // Limit search depth. 3261 if (Depth == MaxDepth) 3262 return false; 3263 3264 auto *Op = dyn_cast<Operator>(V); 3265 if (!Op) 3266 return false; 3267 3268 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. 3269 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP()))) 3270 return true; 3271 3272 // sitofp and uitofp turn into +0.0 for zero. 3273 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) 3274 return true; 3275 3276 if (auto *Call = dyn_cast<CallInst>(Op)) { 3277 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI); 3278 switch (IID) { 3279 default: 3280 break; 3281 // sqrt(-0.0) = -0.0, no other negative results are possible. 3282 case Intrinsic::sqrt: 3283 case Intrinsic::canonicalize: 3284 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); 3285 // fabs(x) != -0.0 3286 case Intrinsic::fabs: 3287 return true; 3288 } 3289 } 3290 3291 return false; 3292 } 3293 3294 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 3295 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 3296 /// bit despite comparing equal. 3297 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 3298 const TargetLibraryInfo *TLI, 3299 bool SignBitOnly, 3300 unsigned Depth) { 3301 // TODO: This function does not do the right thing when SignBitOnly is true 3302 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 3303 // which flips the sign bits of NaNs. See 3304 // https://llvm.org/bugs/show_bug.cgi?id=31702. 3305 3306 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 3307 return !CFP->getValueAPF().isNegative() || 3308 (!SignBitOnly && CFP->getValueAPF().isZero()); 3309 } 3310 3311 // Handle vector of constants. 3312 if (auto *CV = dyn_cast<Constant>(V)) { 3313 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) { 3314 unsigned NumElts = CVFVTy->getNumElements(); 3315 for (unsigned i = 0; i != NumElts; ++i) { 3316 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 3317 if (!CFP) 3318 return false; 3319 if (CFP->getValueAPF().isNegative() && 3320 (SignBitOnly || !CFP->getValueAPF().isZero())) 3321 return false; 3322 } 3323 3324 // All non-negative ConstantFPs. 3325 return true; 3326 } 3327 } 3328 3329 if (Depth == MaxDepth) 3330 return false; // Limit search depth. 3331 3332 const Operator *I = dyn_cast<Operator>(V); 3333 if (!I) 3334 return false; 3335 3336 switch (I->getOpcode()) { 3337 default: 3338 break; 3339 // Unsigned integers are always nonnegative. 3340 case Instruction::UIToFP: 3341 return true; 3342 case Instruction::FMul: 3343 case Instruction::FDiv: 3344 // X * X is always non-negative or a NaN. 3345 // X / X is always exactly 1.0 or a NaN. 3346 if (I->getOperand(0) == I->getOperand(1) && 3347 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 3348 return true; 3349 3350 LLVM_FALLTHROUGH; 3351 case Instruction::FAdd: 3352 case Instruction::FRem: 3353 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3354 Depth + 1) && 3355 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3356 Depth + 1); 3357 case Instruction::Select: 3358 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3359 Depth + 1) && 3360 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 3361 Depth + 1); 3362 case Instruction::FPExt: 3363 case Instruction::FPTrunc: 3364 // Widening/narrowing never change sign. 3365 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3366 Depth + 1); 3367 case Instruction::ExtractElement: 3368 // Look through extract element. At the moment we keep this simple and skip 3369 // tracking the specific element. But at least we might find information 3370 // valid for all elements of the vector. 3371 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3372 Depth + 1); 3373 case Instruction::Call: 3374 const auto *CI = cast<CallInst>(I); 3375 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI); 3376 switch (IID) { 3377 default: 3378 break; 3379 case Intrinsic::maxnum: 3380 return (isKnownNeverNaN(I->getOperand(0), TLI) && 3381 cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, 3382 SignBitOnly, Depth + 1)) || 3383 (isKnownNeverNaN(I->getOperand(1), TLI) && 3384 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, 3385 SignBitOnly, Depth + 1)); 3386 3387 case Intrinsic::maximum: 3388 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3389 Depth + 1) || 3390 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3391 Depth + 1); 3392 case Intrinsic::minnum: 3393 case Intrinsic::minimum: 3394 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3395 Depth + 1) && 3396 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3397 Depth + 1); 3398 case Intrinsic::exp: 3399 case Intrinsic::exp2: 3400 case Intrinsic::fabs: 3401 return true; 3402 3403 case Intrinsic::sqrt: 3404 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 3405 if (!SignBitOnly) 3406 return true; 3407 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 3408 CannotBeNegativeZero(CI->getOperand(0), TLI)); 3409 3410 case Intrinsic::powi: 3411 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 3412 // powi(x,n) is non-negative if n is even. 3413 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 3414 return true; 3415 } 3416 // TODO: This is not correct. Given that exp is an integer, here are the 3417 // ways that pow can return a negative value: 3418 // 3419 // pow(x, exp) --> negative if exp is odd and x is negative. 3420 // pow(-0, exp) --> -inf if exp is negative odd. 3421 // pow(-0, exp) --> -0 if exp is positive odd. 3422 // pow(-inf, exp) --> -0 if exp is negative odd. 3423 // pow(-inf, exp) --> -inf if exp is positive odd. 3424 // 3425 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 3426 // but we must return false if x == -0. Unfortunately we do not currently 3427 // have a way of expressing this constraint. See details in 3428 // https://llvm.org/bugs/show_bug.cgi?id=31702. 3429 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3430 Depth + 1); 3431 3432 case Intrinsic::fma: 3433 case Intrinsic::fmuladd: 3434 // x*x+y is non-negative if y is non-negative. 3435 return I->getOperand(0) == I->getOperand(1) && 3436 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 3437 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 3438 Depth + 1); 3439 } 3440 break; 3441 } 3442 return false; 3443 } 3444 3445 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 3446 const TargetLibraryInfo *TLI) { 3447 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 3448 } 3449 3450 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 3451 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 3452 } 3453 3454 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, 3455 unsigned Depth) { 3456 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type"); 3457 3458 // If we're told that infinities won't happen, assume they won't. 3459 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 3460 if (FPMathOp->hasNoInfs()) 3461 return true; 3462 3463 // Handle scalar constants. 3464 if (auto *CFP = dyn_cast<ConstantFP>(V)) 3465 return !CFP->isInfinity(); 3466 3467 if (Depth == MaxDepth) 3468 return false; 3469 3470 if (auto *Inst = dyn_cast<Instruction>(V)) { 3471 switch (Inst->getOpcode()) { 3472 case Instruction::Select: { 3473 return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) && 3474 isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1); 3475 } 3476 case Instruction::UIToFP: 3477 // If the input type fits into the floating type the result is finite. 3478 return ilogb(APFloat::getLargest( 3479 Inst->getType()->getScalarType()->getFltSemantics())) >= 3480 (int)Inst->getOperand(0)->getType()->getScalarSizeInBits(); 3481 default: 3482 break; 3483 } 3484 } 3485 3486 // try to handle fixed width vector constants 3487 if (isa<FixedVectorType>(V->getType()) && isa<Constant>(V)) { 3488 // For vectors, verify that each element is not infinity. 3489 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements(); 3490 for (unsigned i = 0; i != NumElts; ++i) { 3491 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 3492 if (!Elt) 3493 return false; 3494 if (isa<UndefValue>(Elt)) 3495 continue; 3496 auto *CElt = dyn_cast<ConstantFP>(Elt); 3497 if (!CElt || CElt->isInfinity()) 3498 return false; 3499 } 3500 // All elements were confirmed non-infinity or undefined. 3501 return true; 3502 } 3503 3504 // was not able to prove that V never contains infinity 3505 return false; 3506 } 3507 3508 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, 3509 unsigned Depth) { 3510 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"); 3511 3512 // If we're told that NaNs won't happen, assume they won't. 3513 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 3514 if (FPMathOp->hasNoNaNs()) 3515 return true; 3516 3517 // Handle scalar constants. 3518 if (auto *CFP = dyn_cast<ConstantFP>(V)) 3519 return !CFP->isNaN(); 3520 3521 if (Depth == MaxDepth) 3522 return false; 3523 3524 if (auto *Inst = dyn_cast<Instruction>(V)) { 3525 switch (Inst->getOpcode()) { 3526 case Instruction::FAdd: 3527 case Instruction::FSub: 3528 // Adding positive and negative infinity produces NaN. 3529 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) && 3530 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 3531 (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) || 3532 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1)); 3533 3534 case Instruction::FMul: 3535 // Zero multiplied with infinity produces NaN. 3536 // FIXME: If neither side can be zero fmul never produces NaN. 3537 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) && 3538 isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) && 3539 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 3540 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1); 3541 3542 case Instruction::FDiv: 3543 case Instruction::FRem: 3544 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN. 3545 return false; 3546 3547 case Instruction::Select: { 3548 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 3549 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1); 3550 } 3551 case Instruction::SIToFP: 3552 case Instruction::UIToFP: 3553 return true; 3554 case Instruction::FPTrunc: 3555 case Instruction::FPExt: 3556 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1); 3557 default: 3558 break; 3559 } 3560 } 3561 3562 if (const auto *II = dyn_cast<IntrinsicInst>(V)) { 3563 switch (II->getIntrinsicID()) { 3564 case Intrinsic::canonicalize: 3565 case Intrinsic::fabs: 3566 case Intrinsic::copysign: 3567 case Intrinsic::exp: 3568 case Intrinsic::exp2: 3569 case Intrinsic::floor: 3570 case Intrinsic::ceil: 3571 case Intrinsic::trunc: 3572 case Intrinsic::rint: 3573 case Intrinsic::nearbyint: 3574 case Intrinsic::round: 3575 case Intrinsic::roundeven: 3576 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1); 3577 case Intrinsic::sqrt: 3578 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) && 3579 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI); 3580 case Intrinsic::minnum: 3581 case Intrinsic::maxnum: 3582 // If either operand is not NaN, the result is not NaN. 3583 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) || 3584 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1); 3585 default: 3586 return false; 3587 } 3588 } 3589 3590 // Try to handle fixed width vector constants 3591 if (isa<FixedVectorType>(V->getType()) && isa<Constant>(V)) { 3592 // For vectors, verify that each element is not NaN. 3593 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements(); 3594 for (unsigned i = 0; i != NumElts; ++i) { 3595 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 3596 if (!Elt) 3597 return false; 3598 if (isa<UndefValue>(Elt)) 3599 continue; 3600 auto *CElt = dyn_cast<ConstantFP>(Elt); 3601 if (!CElt || CElt->isNaN()) 3602 return false; 3603 } 3604 // All elements were confirmed not-NaN or undefined. 3605 return true; 3606 } 3607 3608 // Was not able to prove that V never contains NaN 3609 return false; 3610 } 3611 3612 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) { 3613 3614 // All byte-wide stores are splatable, even of arbitrary variables. 3615 if (V->getType()->isIntegerTy(8)) 3616 return V; 3617 3618 LLVMContext &Ctx = V->getContext(); 3619 3620 // Undef don't care. 3621 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx)); 3622 if (isa<UndefValue>(V)) 3623 return UndefInt8; 3624 3625 // Return Undef for zero-sized type. 3626 if (!DL.getTypeStoreSize(V->getType()).isNonZero()) 3627 return UndefInt8; 3628 3629 Constant *C = dyn_cast<Constant>(V); 3630 if (!C) { 3631 // Conceptually, we could handle things like: 3632 // %a = zext i8 %X to i16 3633 // %b = shl i16 %a, 8 3634 // %c = or i16 %a, %b 3635 // but until there is an example that actually needs this, it doesn't seem 3636 // worth worrying about. 3637 return nullptr; 3638 } 3639 3640 // Handle 'null' ConstantArrayZero etc. 3641 if (C->isNullValue()) 3642 return Constant::getNullValue(Type::getInt8Ty(Ctx)); 3643 3644 // Constant floating-point values can be handled as integer values if the 3645 // corresponding integer value is "byteable". An important case is 0.0. 3646 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 3647 Type *Ty = nullptr; 3648 if (CFP->getType()->isHalfTy()) 3649 Ty = Type::getInt16Ty(Ctx); 3650 else if (CFP->getType()->isFloatTy()) 3651 Ty = Type::getInt32Ty(Ctx); 3652 else if (CFP->getType()->isDoubleTy()) 3653 Ty = Type::getInt64Ty(Ctx); 3654 // Don't handle long double formats, which have strange constraints. 3655 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL) 3656 : nullptr; 3657 } 3658 3659 // We can handle constant integers that are multiple of 8 bits. 3660 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 3661 if (CI->getBitWidth() % 8 == 0) { 3662 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 3663 if (!CI->getValue().isSplat(8)) 3664 return nullptr; 3665 return ConstantInt::get(Ctx, CI->getValue().trunc(8)); 3666 } 3667 } 3668 3669 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 3670 if (CE->getOpcode() == Instruction::IntToPtr) { 3671 auto PS = DL.getPointerSizeInBits( 3672 cast<PointerType>(CE->getType())->getAddressSpace()); 3673 return isBytewiseValue( 3674 ConstantExpr::getIntegerCast(CE->getOperand(0), 3675 Type::getIntNTy(Ctx, PS), false), 3676 DL); 3677 } 3678 } 3679 3680 auto Merge = [&](Value *LHS, Value *RHS) -> Value * { 3681 if (LHS == RHS) 3682 return LHS; 3683 if (!LHS || !RHS) 3684 return nullptr; 3685 if (LHS == UndefInt8) 3686 return RHS; 3687 if (RHS == UndefInt8) 3688 return LHS; 3689 return nullptr; 3690 }; 3691 3692 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) { 3693 Value *Val = UndefInt8; 3694 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) 3695 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL)))) 3696 return nullptr; 3697 return Val; 3698 } 3699 3700 if (isa<ConstantAggregate>(C)) { 3701 Value *Val = UndefInt8; 3702 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) 3703 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL)))) 3704 return nullptr; 3705 return Val; 3706 } 3707 3708 // Don't try to handle the handful of other constants. 3709 return nullptr; 3710 } 3711 3712 // This is the recursive version of BuildSubAggregate. It takes a few different 3713 // arguments. Idxs is the index within the nested struct From that we are 3714 // looking at now (which is of type IndexedType). IdxSkip is the number of 3715 // indices from Idxs that should be left out when inserting into the resulting 3716 // struct. To is the result struct built so far, new insertvalue instructions 3717 // build on that. 3718 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 3719 SmallVectorImpl<unsigned> &Idxs, 3720 unsigned IdxSkip, 3721 Instruction *InsertBefore) { 3722 StructType *STy = dyn_cast<StructType>(IndexedType); 3723 if (STy) { 3724 // Save the original To argument so we can modify it 3725 Value *OrigTo = To; 3726 // General case, the type indexed by Idxs is a struct 3727 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3728 // Process each struct element recursively 3729 Idxs.push_back(i); 3730 Value *PrevTo = To; 3731 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 3732 InsertBefore); 3733 Idxs.pop_back(); 3734 if (!To) { 3735 // Couldn't find any inserted value for this index? Cleanup 3736 while (PrevTo != OrigTo) { 3737 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 3738 PrevTo = Del->getAggregateOperand(); 3739 Del->eraseFromParent(); 3740 } 3741 // Stop processing elements 3742 break; 3743 } 3744 } 3745 // If we successfully found a value for each of our subaggregates 3746 if (To) 3747 return To; 3748 } 3749 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 3750 // the struct's elements had a value that was inserted directly. In the latter 3751 // case, perhaps we can't determine each of the subelements individually, but 3752 // we might be able to find the complete struct somewhere. 3753 3754 // Find the value that is at that particular spot 3755 Value *V = FindInsertedValue(From, Idxs); 3756 3757 if (!V) 3758 return nullptr; 3759 3760 // Insert the value in the new (sub) aggregate 3761 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 3762 "tmp", InsertBefore); 3763 } 3764 3765 // This helper takes a nested struct and extracts a part of it (which is again a 3766 // struct) into a new value. For example, given the struct: 3767 // { a, { b, { c, d }, e } } 3768 // and the indices "1, 1" this returns 3769 // { c, d }. 3770 // 3771 // It does this by inserting an insertvalue for each element in the resulting 3772 // struct, as opposed to just inserting a single struct. This will only work if 3773 // each of the elements of the substruct are known (ie, inserted into From by an 3774 // insertvalue instruction somewhere). 3775 // 3776 // All inserted insertvalue instructions are inserted before InsertBefore 3777 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 3778 Instruction *InsertBefore) { 3779 assert(InsertBefore && "Must have someplace to insert!"); 3780 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 3781 idx_range); 3782 Value *To = UndefValue::get(IndexedType); 3783 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 3784 unsigned IdxSkip = Idxs.size(); 3785 3786 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 3787 } 3788 3789 /// Given an aggregate and a sequence of indices, see if the scalar value 3790 /// indexed is already around as a register, for example if it was inserted 3791 /// directly into the aggregate. 3792 /// 3793 /// If InsertBefore is not null, this function will duplicate (modified) 3794 /// insertvalues when a part of a nested struct is extracted. 3795 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 3796 Instruction *InsertBefore) { 3797 // Nothing to index? Just return V then (this is useful at the end of our 3798 // recursion). 3799 if (idx_range.empty()) 3800 return V; 3801 // We have indices, so V should have an indexable type. 3802 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 3803 "Not looking at a struct or array?"); 3804 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 3805 "Invalid indices for type?"); 3806 3807 if (Constant *C = dyn_cast<Constant>(V)) { 3808 C = C->getAggregateElement(idx_range[0]); 3809 if (!C) return nullptr; 3810 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 3811 } 3812 3813 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 3814 // Loop the indices for the insertvalue instruction in parallel with the 3815 // requested indices 3816 const unsigned *req_idx = idx_range.begin(); 3817 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 3818 i != e; ++i, ++req_idx) { 3819 if (req_idx == idx_range.end()) { 3820 // We can't handle this without inserting insertvalues 3821 if (!InsertBefore) 3822 return nullptr; 3823 3824 // The requested index identifies a part of a nested aggregate. Handle 3825 // this specially. For example, 3826 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 3827 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 3828 // %C = extractvalue {i32, { i32, i32 } } %B, 1 3829 // This can be changed into 3830 // %A = insertvalue {i32, i32 } undef, i32 10, 0 3831 // %C = insertvalue {i32, i32 } %A, i32 11, 1 3832 // which allows the unused 0,0 element from the nested struct to be 3833 // removed. 3834 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 3835 InsertBefore); 3836 } 3837 3838 // This insert value inserts something else than what we are looking for. 3839 // See if the (aggregate) value inserted into has the value we are 3840 // looking for, then. 3841 if (*req_idx != *i) 3842 return FindInsertedValue(I->getAggregateOperand(), idx_range, 3843 InsertBefore); 3844 } 3845 // If we end up here, the indices of the insertvalue match with those 3846 // requested (though possibly only partially). Now we recursively look at 3847 // the inserted value, passing any remaining indices. 3848 return FindInsertedValue(I->getInsertedValueOperand(), 3849 makeArrayRef(req_idx, idx_range.end()), 3850 InsertBefore); 3851 } 3852 3853 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 3854 // If we're extracting a value from an aggregate that was extracted from 3855 // something else, we can extract from that something else directly instead. 3856 // However, we will need to chain I's indices with the requested indices. 3857 3858 // Calculate the number of indices required 3859 unsigned size = I->getNumIndices() + idx_range.size(); 3860 // Allocate some space to put the new indices in 3861 SmallVector<unsigned, 5> Idxs; 3862 Idxs.reserve(size); 3863 // Add indices from the extract value instruction 3864 Idxs.append(I->idx_begin(), I->idx_end()); 3865 3866 // Add requested indices 3867 Idxs.append(idx_range.begin(), idx_range.end()); 3868 3869 assert(Idxs.size() == size 3870 && "Number of indices added not correct?"); 3871 3872 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 3873 } 3874 // Otherwise, we don't know (such as, extracting from a function return value 3875 // or load instruction) 3876 return nullptr; 3877 } 3878 3879 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 3880 unsigned CharSize) { 3881 // Make sure the GEP has exactly three arguments. 3882 if (GEP->getNumOperands() != 3) 3883 return false; 3884 3885 // Make sure the index-ee is a pointer to array of \p CharSize integers. 3886 // CharSize. 3887 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 3888 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 3889 return false; 3890 3891 // Check to make sure that the first operand of the GEP is an integer and 3892 // has value 0 so that we are sure we're indexing into the initializer. 3893 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 3894 if (!FirstIdx || !FirstIdx->isZero()) 3895 return false; 3896 3897 return true; 3898 } 3899 3900 bool llvm::getConstantDataArrayInfo(const Value *V, 3901 ConstantDataArraySlice &Slice, 3902 unsigned ElementSize, uint64_t Offset) { 3903 assert(V); 3904 3905 // Look through bitcast instructions and geps. 3906 V = V->stripPointerCasts(); 3907 3908 // If the value is a GEP instruction or constant expression, treat it as an 3909 // offset. 3910 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 3911 // The GEP operator should be based on a pointer to string constant, and is 3912 // indexing into the string constant. 3913 if (!isGEPBasedOnPointerToString(GEP, ElementSize)) 3914 return false; 3915 3916 // If the second index isn't a ConstantInt, then this is a variable index 3917 // into the array. If this occurs, we can't say anything meaningful about 3918 // the string. 3919 uint64_t StartIdx = 0; 3920 if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 3921 StartIdx = CI->getZExtValue(); 3922 else 3923 return false; 3924 return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize, 3925 StartIdx + Offset); 3926 } 3927 3928 // The GEP instruction, constant or instruction, must reference a global 3929 // variable that is a constant and is initialized. The referenced constant 3930 // initializer is the array that we'll use for optimization. 3931 const GlobalVariable *GV = dyn_cast<GlobalVariable>(V); 3932 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 3933 return false; 3934 3935 const ConstantDataArray *Array; 3936 ArrayType *ArrayTy; 3937 if (GV->getInitializer()->isNullValue()) { 3938 Type *GVTy = GV->getValueType(); 3939 if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) { 3940 // A zeroinitializer for the array; there is no ConstantDataArray. 3941 Array = nullptr; 3942 } else { 3943 const DataLayout &DL = GV->getParent()->getDataLayout(); 3944 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize(); 3945 uint64_t Length = SizeInBytes / (ElementSize / 8); 3946 if (Length <= Offset) 3947 return false; 3948 3949 Slice.Array = nullptr; 3950 Slice.Offset = 0; 3951 Slice.Length = Length - Offset; 3952 return true; 3953 } 3954 } else { 3955 // This must be a ConstantDataArray. 3956 Array = dyn_cast<ConstantDataArray>(GV->getInitializer()); 3957 if (!Array) 3958 return false; 3959 ArrayTy = Array->getType(); 3960 } 3961 if (!ArrayTy->getElementType()->isIntegerTy(ElementSize)) 3962 return false; 3963 3964 uint64_t NumElts = ArrayTy->getArrayNumElements(); 3965 if (Offset > NumElts) 3966 return false; 3967 3968 Slice.Array = Array; 3969 Slice.Offset = Offset; 3970 Slice.Length = NumElts - Offset; 3971 return true; 3972 } 3973 3974 /// This function computes the length of a null-terminated C string pointed to 3975 /// by V. If successful, it returns true and returns the string in Str. 3976 /// If unsuccessful, it returns false. 3977 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 3978 uint64_t Offset, bool TrimAtNul) { 3979 ConstantDataArraySlice Slice; 3980 if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) 3981 return false; 3982 3983 if (Slice.Array == nullptr) { 3984 if (TrimAtNul) { 3985 Str = StringRef(); 3986 return true; 3987 } 3988 if (Slice.Length == 1) { 3989 Str = StringRef("", 1); 3990 return true; 3991 } 3992 // We cannot instantiate a StringRef as we do not have an appropriate string 3993 // of 0s at hand. 3994 return false; 3995 } 3996 3997 // Start out with the entire array in the StringRef. 3998 Str = Slice.Array->getAsString(); 3999 // Skip over 'offset' bytes. 4000 Str = Str.substr(Slice.Offset); 4001 4002 if (TrimAtNul) { 4003 // Trim off the \0 and anything after it. If the array is not nul 4004 // terminated, we just return the whole end of string. The client may know 4005 // some other way that the string is length-bound. 4006 Str = Str.substr(0, Str.find('\0')); 4007 } 4008 return true; 4009 } 4010 4011 // These next two are very similar to the above, but also look through PHI 4012 // nodes. 4013 // TODO: See if we can integrate these two together. 4014 4015 /// If we can compute the length of the string pointed to by 4016 /// the specified pointer, return 'len+1'. If we can't, return 0. 4017 static uint64_t GetStringLengthH(const Value *V, 4018 SmallPtrSetImpl<const PHINode*> &PHIs, 4019 unsigned CharSize) { 4020 // Look through noop bitcast instructions. 4021 V = V->stripPointerCasts(); 4022 4023 // If this is a PHI node, there are two cases: either we have already seen it 4024 // or we haven't. 4025 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 4026 if (!PHIs.insert(PN).second) 4027 return ~0ULL; // already in the set. 4028 4029 // If it was new, see if all the input strings are the same length. 4030 uint64_t LenSoFar = ~0ULL; 4031 for (Value *IncValue : PN->incoming_values()) { 4032 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 4033 if (Len == 0) return 0; // Unknown length -> unknown. 4034 4035 if (Len == ~0ULL) continue; 4036 4037 if (Len != LenSoFar && LenSoFar != ~0ULL) 4038 return 0; // Disagree -> unknown. 4039 LenSoFar = Len; 4040 } 4041 4042 // Success, all agree. 4043 return LenSoFar; 4044 } 4045 4046 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 4047 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 4048 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 4049 if (Len1 == 0) return 0; 4050 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 4051 if (Len2 == 0) return 0; 4052 if (Len1 == ~0ULL) return Len2; 4053 if (Len2 == ~0ULL) return Len1; 4054 if (Len1 != Len2) return 0; 4055 return Len1; 4056 } 4057 4058 // Otherwise, see if we can read the string. 4059 ConstantDataArraySlice Slice; 4060 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 4061 return 0; 4062 4063 if (Slice.Array == nullptr) 4064 return 1; 4065 4066 // Search for nul characters 4067 unsigned NullIndex = 0; 4068 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 4069 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 4070 break; 4071 } 4072 4073 return NullIndex + 1; 4074 } 4075 4076 /// If we can compute the length of the string pointed to by 4077 /// the specified pointer, return 'len+1'. If we can't, return 0. 4078 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 4079 if (!V->getType()->isPointerTy()) 4080 return 0; 4081 4082 SmallPtrSet<const PHINode*, 32> PHIs; 4083 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 4084 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 4085 // an empty string as a length. 4086 return Len == ~0ULL ? 1 : Len; 4087 } 4088 4089 const Value * 4090 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call, 4091 bool MustPreserveNullness) { 4092 assert(Call && 4093 "getArgumentAliasingToReturnedPointer only works on nonnull calls"); 4094 if (const Value *RV = Call->getReturnedArgOperand()) 4095 return RV; 4096 // This can be used only as a aliasing property. 4097 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 4098 Call, MustPreserveNullness)) 4099 return Call->getArgOperand(0); 4100 return nullptr; 4101 } 4102 4103 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 4104 const CallBase *Call, bool MustPreserveNullness) { 4105 switch (Call->getIntrinsicID()) { 4106 case Intrinsic::launder_invariant_group: 4107 case Intrinsic::strip_invariant_group: 4108 case Intrinsic::aarch64_irg: 4109 case Intrinsic::aarch64_tagp: 4110 return true; 4111 case Intrinsic::ptrmask: 4112 return !MustPreserveNullness; 4113 default: 4114 return false; 4115 } 4116 } 4117 4118 /// \p PN defines a loop-variant pointer to an object. Check if the 4119 /// previous iteration of the loop was referring to the same object as \p PN. 4120 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 4121 const LoopInfo *LI) { 4122 // Find the loop-defined value. 4123 Loop *L = LI->getLoopFor(PN->getParent()); 4124 if (PN->getNumIncomingValues() != 2) 4125 return true; 4126 4127 // Find the value from previous iteration. 4128 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 4129 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 4130 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 4131 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 4132 return true; 4133 4134 // If a new pointer is loaded in the loop, the pointer references a different 4135 // object in every iteration. E.g.: 4136 // for (i) 4137 // int *p = a[i]; 4138 // ... 4139 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 4140 if (!L->isLoopInvariant(Load->getPointerOperand())) 4141 return false; 4142 return true; 4143 } 4144 4145 Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL, 4146 unsigned MaxLookup) { 4147 if (!V->getType()->isPointerTy()) 4148 return V; 4149 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 4150 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 4151 V = GEP->getPointerOperand(); 4152 } else if (Operator::getOpcode(V) == Instruction::BitCast || 4153 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 4154 V = cast<Operator>(V)->getOperand(0); 4155 if (!V->getType()->isPointerTy()) 4156 return V; 4157 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 4158 if (GA->isInterposable()) 4159 return V; 4160 V = GA->getAliasee(); 4161 } else { 4162 if (auto *PHI = dyn_cast<PHINode>(V)) { 4163 // Look through single-arg phi nodes created by LCSSA. 4164 if (PHI->getNumIncomingValues() == 1) { 4165 V = PHI->getIncomingValue(0); 4166 continue; 4167 } 4168 } else if (auto *Call = dyn_cast<CallBase>(V)) { 4169 // CaptureTracking can know about special capturing properties of some 4170 // intrinsics like launder.invariant.group, that can't be expressed with 4171 // the attributes, but have properties like returning aliasing pointer. 4172 // Because some analysis may assume that nocaptured pointer is not 4173 // returned from some special intrinsic (because function would have to 4174 // be marked with returns attribute), it is crucial to use this function 4175 // because it should be in sync with CaptureTracking. Not using it may 4176 // cause weird miscompilations where 2 aliasing pointers are assumed to 4177 // noalias. 4178 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 4179 V = RP; 4180 continue; 4181 } 4182 } 4183 4184 return V; 4185 } 4186 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 4187 } 4188 return V; 4189 } 4190 4191 void llvm::GetUnderlyingObjects(const Value *V, 4192 SmallVectorImpl<const Value *> &Objects, 4193 const DataLayout &DL, LoopInfo *LI, 4194 unsigned MaxLookup) { 4195 SmallPtrSet<const Value *, 4> Visited; 4196 SmallVector<const Value *, 4> Worklist; 4197 Worklist.push_back(V); 4198 do { 4199 const Value *P = Worklist.pop_back_val(); 4200 P = GetUnderlyingObject(P, DL, MaxLookup); 4201 4202 if (!Visited.insert(P).second) 4203 continue; 4204 4205 if (auto *SI = dyn_cast<SelectInst>(P)) { 4206 Worklist.push_back(SI->getTrueValue()); 4207 Worklist.push_back(SI->getFalseValue()); 4208 continue; 4209 } 4210 4211 if (auto *PN = dyn_cast<PHINode>(P)) { 4212 // If this PHI changes the underlying object in every iteration of the 4213 // loop, don't look through it. Consider: 4214 // int **A; 4215 // for (i) { 4216 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 4217 // Curr = A[i]; 4218 // *Prev, *Curr; 4219 // 4220 // Prev is tracking Curr one iteration behind so they refer to different 4221 // underlying objects. 4222 if (!LI || !LI->isLoopHeader(PN->getParent()) || 4223 isSameUnderlyingObjectInLoop(PN, LI)) 4224 for (Value *IncValue : PN->incoming_values()) 4225 Worklist.push_back(IncValue); 4226 continue; 4227 } 4228 4229 Objects.push_back(P); 4230 } while (!Worklist.empty()); 4231 } 4232 4233 /// This is the function that does the work of looking through basic 4234 /// ptrtoint+arithmetic+inttoptr sequences. 4235 static const Value *getUnderlyingObjectFromInt(const Value *V) { 4236 do { 4237 if (const Operator *U = dyn_cast<Operator>(V)) { 4238 // If we find a ptrtoint, we can transfer control back to the 4239 // regular getUnderlyingObjectFromInt. 4240 if (U->getOpcode() == Instruction::PtrToInt) 4241 return U->getOperand(0); 4242 // If we find an add of a constant, a multiplied value, or a phi, it's 4243 // likely that the other operand will lead us to the base 4244 // object. We don't have to worry about the case where the 4245 // object address is somehow being computed by the multiply, 4246 // because our callers only care when the result is an 4247 // identifiable object. 4248 if (U->getOpcode() != Instruction::Add || 4249 (!isa<ConstantInt>(U->getOperand(1)) && 4250 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 4251 !isa<PHINode>(U->getOperand(1)))) 4252 return V; 4253 V = U->getOperand(0); 4254 } else { 4255 return V; 4256 } 4257 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 4258 } while (true); 4259 } 4260 4261 /// This is a wrapper around GetUnderlyingObjects and adds support for basic 4262 /// ptrtoint+arithmetic+inttoptr sequences. 4263 /// It returns false if unidentified object is found in GetUnderlyingObjects. 4264 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, 4265 SmallVectorImpl<Value *> &Objects, 4266 const DataLayout &DL) { 4267 SmallPtrSet<const Value *, 16> Visited; 4268 SmallVector<const Value *, 4> Working(1, V); 4269 do { 4270 V = Working.pop_back_val(); 4271 4272 SmallVector<const Value *, 4> Objs; 4273 GetUnderlyingObjects(V, Objs, DL); 4274 4275 for (const Value *V : Objs) { 4276 if (!Visited.insert(V).second) 4277 continue; 4278 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 4279 const Value *O = 4280 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 4281 if (O->getType()->isPointerTy()) { 4282 Working.push_back(O); 4283 continue; 4284 } 4285 } 4286 // If GetUnderlyingObjects fails to find an identifiable object, 4287 // getUnderlyingObjectsForCodeGen also fails for safety. 4288 if (!isIdentifiedObject(V)) { 4289 Objects.clear(); 4290 return false; 4291 } 4292 Objects.push_back(const_cast<Value *>(V)); 4293 } 4294 } while (!Working.empty()); 4295 return true; 4296 } 4297 4298 /// Return true if the only users of this pointer are lifetime markers. 4299 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 4300 for (const User *U : V->users()) { 4301 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 4302 if (!II) return false; 4303 4304 if (!II->isLifetimeStartOrEnd()) 4305 return false; 4306 } 4307 return true; 4308 } 4309 4310 bool llvm::mustSuppressSpeculation(const LoadInst &LI) { 4311 if (!LI.isUnordered()) 4312 return true; 4313 const Function &F = *LI.getFunction(); 4314 // Speculative load may create a race that did not exist in the source. 4315 return F.hasFnAttribute(Attribute::SanitizeThread) || 4316 // Speculative load may load data from dirty regions. 4317 F.hasFnAttribute(Attribute::SanitizeAddress) || 4318 F.hasFnAttribute(Attribute::SanitizeHWAddress); 4319 } 4320 4321 4322 bool llvm::isSafeToSpeculativelyExecute(const Value *V, 4323 const Instruction *CtxI, 4324 const DominatorTree *DT) { 4325 const Operator *Inst = dyn_cast<Operator>(V); 4326 if (!Inst) 4327 return false; 4328 4329 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) 4330 if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i))) 4331 if (C->canTrap()) 4332 return false; 4333 4334 switch (Inst->getOpcode()) { 4335 default: 4336 return true; 4337 case Instruction::UDiv: 4338 case Instruction::URem: { 4339 // x / y is undefined if y == 0. 4340 const APInt *V; 4341 if (match(Inst->getOperand(1), m_APInt(V))) 4342 return *V != 0; 4343 return false; 4344 } 4345 case Instruction::SDiv: 4346 case Instruction::SRem: { 4347 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 4348 const APInt *Numerator, *Denominator; 4349 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 4350 return false; 4351 // We cannot hoist this division if the denominator is 0. 4352 if (*Denominator == 0) 4353 return false; 4354 // It's safe to hoist if the denominator is not 0 or -1. 4355 if (*Denominator != -1) 4356 return true; 4357 // At this point we know that the denominator is -1. It is safe to hoist as 4358 // long we know that the numerator is not INT_MIN. 4359 if (match(Inst->getOperand(0), m_APInt(Numerator))) 4360 return !Numerator->isMinSignedValue(); 4361 // The numerator *might* be MinSignedValue. 4362 return false; 4363 } 4364 case Instruction::Load: { 4365 const LoadInst *LI = cast<LoadInst>(Inst); 4366 if (mustSuppressSpeculation(*LI)) 4367 return false; 4368 const DataLayout &DL = LI->getModule()->getDataLayout(); 4369 return isDereferenceableAndAlignedPointer( 4370 LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()), 4371 DL, CtxI, DT); 4372 } 4373 case Instruction::Call: { 4374 auto *CI = cast<const CallInst>(Inst); 4375 const Function *Callee = CI->getCalledFunction(); 4376 4377 // The called function could have undefined behavior or side-effects, even 4378 // if marked readnone nounwind. 4379 return Callee && Callee->isSpeculatable(); 4380 } 4381 case Instruction::VAArg: 4382 case Instruction::Alloca: 4383 case Instruction::Invoke: 4384 case Instruction::CallBr: 4385 case Instruction::PHI: 4386 case Instruction::Store: 4387 case Instruction::Ret: 4388 case Instruction::Br: 4389 case Instruction::IndirectBr: 4390 case Instruction::Switch: 4391 case Instruction::Unreachable: 4392 case Instruction::Fence: 4393 case Instruction::AtomicRMW: 4394 case Instruction::AtomicCmpXchg: 4395 case Instruction::LandingPad: 4396 case Instruction::Resume: 4397 case Instruction::CatchSwitch: 4398 case Instruction::CatchPad: 4399 case Instruction::CatchRet: 4400 case Instruction::CleanupPad: 4401 case Instruction::CleanupRet: 4402 return false; // Misc instructions which have effects 4403 } 4404 } 4405 4406 bool llvm::mayBeMemoryDependent(const Instruction &I) { 4407 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I); 4408 } 4409 4410 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult. 4411 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) { 4412 switch (OR) { 4413 case ConstantRange::OverflowResult::MayOverflow: 4414 return OverflowResult::MayOverflow; 4415 case ConstantRange::OverflowResult::AlwaysOverflowsLow: 4416 return OverflowResult::AlwaysOverflowsLow; 4417 case ConstantRange::OverflowResult::AlwaysOverflowsHigh: 4418 return OverflowResult::AlwaysOverflowsHigh; 4419 case ConstantRange::OverflowResult::NeverOverflows: 4420 return OverflowResult::NeverOverflows; 4421 } 4422 llvm_unreachable("Unknown OverflowResult"); 4423 } 4424 4425 /// Combine constant ranges from computeConstantRange() and computeKnownBits(). 4426 static ConstantRange computeConstantRangeIncludingKnownBits( 4427 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth, 4428 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4429 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) { 4430 KnownBits Known = computeKnownBits( 4431 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo); 4432 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned); 4433 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo); 4434 ConstantRange::PreferredRangeType RangeType = 4435 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned; 4436 return CR1.intersectWith(CR2, RangeType); 4437 } 4438 4439 OverflowResult llvm::computeOverflowForUnsignedMul( 4440 const Value *LHS, const Value *RHS, const DataLayout &DL, 4441 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4442 bool UseInstrInfo) { 4443 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 4444 nullptr, UseInstrInfo); 4445 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 4446 nullptr, UseInstrInfo); 4447 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false); 4448 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false); 4449 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange)); 4450 } 4451 4452 OverflowResult 4453 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS, 4454 const DataLayout &DL, AssumptionCache *AC, 4455 const Instruction *CxtI, 4456 const DominatorTree *DT, bool UseInstrInfo) { 4457 // Multiplying n * m significant bits yields a result of n + m significant 4458 // bits. If the total number of significant bits does not exceed the 4459 // result bit width (minus 1), there is no overflow. 4460 // This means if we have enough leading sign bits in the operands 4461 // we can guarantee that the result does not overflow. 4462 // Ref: "Hacker's Delight" by Henry Warren 4463 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 4464 4465 // Note that underestimating the number of sign bits gives a more 4466 // conservative answer. 4467 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + 4468 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); 4469 4470 // First handle the easy case: if we have enough sign bits there's 4471 // definitely no overflow. 4472 if (SignBits > BitWidth + 1) 4473 return OverflowResult::NeverOverflows; 4474 4475 // There are two ambiguous cases where there can be no overflow: 4476 // SignBits == BitWidth + 1 and 4477 // SignBits == BitWidth 4478 // The second case is difficult to check, therefore we only handle the 4479 // first case. 4480 if (SignBits == BitWidth + 1) { 4481 // It overflows only when both arguments are negative and the true 4482 // product is exactly the minimum negative number. 4483 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 4484 // For simplicity we just check if at least one side is not negative. 4485 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 4486 nullptr, UseInstrInfo); 4487 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 4488 nullptr, UseInstrInfo); 4489 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) 4490 return OverflowResult::NeverOverflows; 4491 } 4492 return OverflowResult::MayOverflow; 4493 } 4494 4495 OverflowResult llvm::computeOverflowForUnsignedAdd( 4496 const Value *LHS, const Value *RHS, const DataLayout &DL, 4497 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4498 bool UseInstrInfo) { 4499 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 4500 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, 4501 nullptr, UseInstrInfo); 4502 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 4503 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, 4504 nullptr, UseInstrInfo); 4505 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange)); 4506 } 4507 4508 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 4509 const Value *RHS, 4510 const AddOperator *Add, 4511 const DataLayout &DL, 4512 AssumptionCache *AC, 4513 const Instruction *CxtI, 4514 const DominatorTree *DT) { 4515 if (Add && Add->hasNoSignedWrap()) { 4516 return OverflowResult::NeverOverflows; 4517 } 4518 4519 // If LHS and RHS each have at least two sign bits, the addition will look 4520 // like 4521 // 4522 // XX..... + 4523 // YY..... 4524 // 4525 // If the carry into the most significant position is 0, X and Y can't both 4526 // be 1 and therefore the carry out of the addition is also 0. 4527 // 4528 // If the carry into the most significant position is 1, X and Y can't both 4529 // be 0 and therefore the carry out of the addition is also 1. 4530 // 4531 // Since the carry into the most significant position is always equal to 4532 // the carry out of the addition, there is no signed overflow. 4533 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4534 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4535 return OverflowResult::NeverOverflows; 4536 4537 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 4538 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 4539 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 4540 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 4541 OverflowResult OR = 4542 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange)); 4543 if (OR != OverflowResult::MayOverflow) 4544 return OR; 4545 4546 // The remaining code needs Add to be available. Early returns if not so. 4547 if (!Add) 4548 return OverflowResult::MayOverflow; 4549 4550 // If the sign of Add is the same as at least one of the operands, this add 4551 // CANNOT overflow. If this can be determined from the known bits of the 4552 // operands the above signedAddMayOverflow() check will have already done so. 4553 // The only other way to improve on the known bits is from an assumption, so 4554 // call computeKnownBitsFromAssume() directly. 4555 bool LHSOrRHSKnownNonNegative = 4556 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative()); 4557 bool LHSOrRHSKnownNegative = 4558 (LHSRange.isAllNegative() || RHSRange.isAllNegative()); 4559 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 4560 KnownBits AddKnown(LHSRange.getBitWidth()); 4561 computeKnownBitsFromAssume( 4562 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true)); 4563 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 4564 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) 4565 return OverflowResult::NeverOverflows; 4566 } 4567 4568 return OverflowResult::MayOverflow; 4569 } 4570 4571 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, 4572 const Value *RHS, 4573 const DataLayout &DL, 4574 AssumptionCache *AC, 4575 const Instruction *CxtI, 4576 const DominatorTree *DT) { 4577 // Checking for conditions implied by dominating conditions may be expensive. 4578 // Limit it to usub_with_overflow calls for now. 4579 if (match(CxtI, 4580 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value()))) 4581 if (auto C = 4582 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) { 4583 if (*C) 4584 return OverflowResult::NeverOverflows; 4585 return OverflowResult::AlwaysOverflowsLow; 4586 } 4587 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 4588 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); 4589 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 4590 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); 4591 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange)); 4592 } 4593 4594 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, 4595 const Value *RHS, 4596 const DataLayout &DL, 4597 AssumptionCache *AC, 4598 const Instruction *CxtI, 4599 const DominatorTree *DT) { 4600 // If LHS and RHS each have at least two sign bits, the subtraction 4601 // cannot overflow. 4602 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4603 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4604 return OverflowResult::NeverOverflows; 4605 4606 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 4607 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 4608 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 4609 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 4610 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange)); 4611 } 4612 4613 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, 4614 const DominatorTree &DT) { 4615 SmallVector<const BranchInst *, 2> GuardingBranches; 4616 SmallVector<const ExtractValueInst *, 2> Results; 4617 4618 for (const User *U : WO->users()) { 4619 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 4620 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 4621 4622 if (EVI->getIndices()[0] == 0) 4623 Results.push_back(EVI); 4624 else { 4625 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 4626 4627 for (const auto *U : EVI->users()) 4628 if (const auto *B = dyn_cast<BranchInst>(U)) { 4629 assert(B->isConditional() && "How else is it using an i1?"); 4630 GuardingBranches.push_back(B); 4631 } 4632 } 4633 } else { 4634 // We are using the aggregate directly in a way we don't want to analyze 4635 // here (storing it to a global, say). 4636 return false; 4637 } 4638 } 4639 4640 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 4641 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 4642 if (!NoWrapEdge.isSingleEdge()) 4643 return false; 4644 4645 // Check if all users of the add are provably no-wrap. 4646 for (const auto *Result : Results) { 4647 // If the extractvalue itself is not executed on overflow, the we don't 4648 // need to check each use separately, since domination is transitive. 4649 if (DT.dominates(NoWrapEdge, Result->getParent())) 4650 continue; 4651 4652 for (auto &RU : Result->uses()) 4653 if (!DT.dominates(NoWrapEdge, RU)) 4654 return false; 4655 } 4656 4657 return true; 4658 }; 4659 4660 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); 4661 } 4662 4663 bool llvm::canCreatePoison(const Instruction *I) { 4664 // See whether I has flags that may create poison 4665 if (isa<OverflowingBinaryOperator>(I) && 4666 (I->hasNoSignedWrap() || I->hasNoUnsignedWrap())) 4667 return true; 4668 if (isa<PossiblyExactOperator>(I) && I->isExact()) 4669 return true; 4670 if (auto *FP = dyn_cast<FPMathOperator>(I)) { 4671 auto FMF = FP->getFastMathFlags(); 4672 if (FMF.noNaNs() || FMF.noInfs()) 4673 return true; 4674 } 4675 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) 4676 if (GEP->isInBounds()) 4677 return true; 4678 4679 unsigned Opcode = I->getOpcode(); 4680 4681 // Check whether opcode is a poison-generating operation 4682 switch (Opcode) { 4683 case Instruction::Shl: 4684 case Instruction::AShr: 4685 case Instruction::LShr: { 4686 // Shifts return poison if shiftwidth is larger than the bitwidth. 4687 if (auto *C = dyn_cast<Constant>(I->getOperand(1))) { 4688 SmallVector<Constant *, 4> ShiftAmounts; 4689 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) { 4690 unsigned NumElts = FVTy->getNumElements(); 4691 for (unsigned i = 0; i < NumElts; ++i) 4692 ShiftAmounts.push_back(C->getAggregateElement(i)); 4693 } else if (isa<ScalableVectorType>(C->getType())) 4694 return true; // Can't tell, just return true to be safe 4695 else 4696 ShiftAmounts.push_back(C); 4697 4698 bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) { 4699 auto *CI = dyn_cast<ConstantInt>(C); 4700 return CI && CI->getZExtValue() < C->getType()->getIntegerBitWidth(); 4701 }); 4702 return !Safe; 4703 } 4704 return true; 4705 } 4706 case Instruction::FPToSI: 4707 case Instruction::FPToUI: 4708 // fptosi/ui yields poison if the resulting value does not fit in the 4709 // destination type. 4710 return true; 4711 case Instruction::Call: 4712 case Instruction::CallBr: 4713 case Instruction::Invoke: 4714 // Function calls can return a poison value even if args are non-poison 4715 // values. 4716 return true; 4717 case Instruction::InsertElement: 4718 case Instruction::ExtractElement: { 4719 // If index exceeds the length of the vector, it returns poison 4720 auto *VTy = cast<VectorType>(I->getOperand(0)->getType()); 4721 unsigned IdxOp = I->getOpcode() == Instruction::InsertElement ? 2 : 1; 4722 auto *Idx = dyn_cast<ConstantInt>(I->getOperand(IdxOp)); 4723 if (!Idx || Idx->getZExtValue() >= VTy->getElementCount().Min) 4724 return true; 4725 return false; 4726 } 4727 case Instruction::FNeg: 4728 case Instruction::PHI: 4729 case Instruction::Select: 4730 case Instruction::URem: 4731 case Instruction::SRem: 4732 case Instruction::ShuffleVector: 4733 case Instruction::ExtractValue: 4734 case Instruction::InsertValue: 4735 case Instruction::Freeze: 4736 case Instruction::ICmp: 4737 case Instruction::FCmp: 4738 case Instruction::GetElementPtr: 4739 return false; 4740 default: 4741 if (isa<CastInst>(I)) 4742 return false; 4743 else if (isa<BinaryOperator>(I)) 4744 return false; 4745 // Be conservative and return true. 4746 return true; 4747 } 4748 } 4749 4750 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, 4751 const Instruction *CtxI, 4752 const DominatorTree *DT, 4753 unsigned Depth) { 4754 if (Depth >= MaxDepth) 4755 return false; 4756 4757 // If the value is a freeze instruction, then it can never 4758 // be undef or poison. 4759 if (isa<FreezeInst>(V)) 4760 return true; 4761 // TODO: Some instructions are guaranteed to return neither undef 4762 // nor poison if their arguments are not poison/undef. 4763 4764 if (auto *C = dyn_cast<Constant>(V)) { 4765 // TODO: We can analyze ConstExpr by opcode to determine if there is any 4766 // possibility of poison. 4767 if (isa<UndefValue>(C) || isa<ConstantExpr>(C)) 4768 return false; 4769 4770 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) || 4771 isa<ConstantPointerNull>(C) || isa<Function>(C)) 4772 return true; 4773 4774 if (C->getType()->isVectorTy()) 4775 return !C->containsUndefElement() && !C->containsConstantExpression(); 4776 4777 // TODO: Recursively analyze aggregates or other constants. 4778 return false; 4779 } 4780 4781 // Strip cast operations from a pointer value. 4782 // Note that stripPointerCastsSameRepresentation can strip off getelementptr 4783 // inbounds with zero offset. To guarantee that the result isn't poison, the 4784 // stripped pointer is checked as it has to be pointing into an allocated 4785 // object or be null `null` to ensure `inbounds` getelement pointers with a 4786 // zero offset could not produce poison. 4787 // It can strip off addrspacecast that do not change bit representation as 4788 // well. We believe that such addrspacecast is equivalent to no-op. 4789 auto *StrippedV = V->stripPointerCastsSameRepresentation(); 4790 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) || 4791 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV)) 4792 return true; 4793 4794 auto OpCheck = [&](const Value *V) { 4795 return isGuaranteedNotToBeUndefOrPoison(V, CtxI, DT, Depth + 1); 4796 }; 4797 4798 if (auto *I = dyn_cast<Instruction>(V)) { 4799 switch (I->getOpcode()) { 4800 case Instruction::GetElementPtr: { 4801 auto *GEPI = dyn_cast<GetElementPtrInst>(I); 4802 if (!GEPI->isInBounds() && llvm::all_of(GEPI->operands(), OpCheck)) 4803 return true; 4804 break; 4805 } 4806 case Instruction::FCmp: { 4807 auto *FI = dyn_cast<FCmpInst>(I); 4808 if (FI->getFastMathFlags().none() && 4809 llvm::all_of(FI->operands(), OpCheck)) 4810 return true; 4811 break; 4812 } 4813 case Instruction::BitCast: 4814 case Instruction::PHI: 4815 case Instruction::ICmp: 4816 if (llvm::all_of(I->operands(), OpCheck)) 4817 return true; 4818 break; 4819 default: 4820 break; 4821 } 4822 4823 if (programUndefinedIfPoison(I) && I->getType()->isIntegerTy(1)) 4824 // Note: once we have an agreement that poison is a value-wise concept, 4825 // we can remove the isIntegerTy(1) constraint. 4826 return true; 4827 } 4828 4829 // CxtI may be null or a cloned instruction. 4830 if (!CtxI || !CtxI->getParent() || !DT) 4831 return false; 4832 4833 auto *DNode = DT->getNode(CtxI->getParent()); 4834 if (!DNode) 4835 // Unreachable block 4836 return false; 4837 4838 // If V is used as a branch condition before reaching CtxI, V cannot be 4839 // undef or poison. 4840 // br V, BB1, BB2 4841 // BB1: 4842 // CtxI ; V cannot be undef or poison here 4843 auto *Dominator = DNode->getIDom(); 4844 while (Dominator) { 4845 auto *TI = Dominator->getBlock()->getTerminator(); 4846 4847 if (auto BI = dyn_cast<BranchInst>(TI)) { 4848 if (BI->isConditional() && BI->getCondition() == V) 4849 return true; 4850 } else if (auto SI = dyn_cast<SwitchInst>(TI)) { 4851 if (SI->getCondition() == V) 4852 return true; 4853 } 4854 4855 Dominator = Dominator->getIDom(); 4856 } 4857 4858 return false; 4859 } 4860 4861 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 4862 const DataLayout &DL, 4863 AssumptionCache *AC, 4864 const Instruction *CxtI, 4865 const DominatorTree *DT) { 4866 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 4867 Add, DL, AC, CxtI, DT); 4868 } 4869 4870 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 4871 const Value *RHS, 4872 const DataLayout &DL, 4873 AssumptionCache *AC, 4874 const Instruction *CxtI, 4875 const DominatorTree *DT) { 4876 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 4877 } 4878 4879 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 4880 // Note: An atomic operation isn't guaranteed to return in a reasonable amount 4881 // of time because it's possible for another thread to interfere with it for an 4882 // arbitrary length of time, but programs aren't allowed to rely on that. 4883 4884 // If there is no successor, then execution can't transfer to it. 4885 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) 4886 return !CRI->unwindsToCaller(); 4887 if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) 4888 return !CatchSwitch->unwindsToCaller(); 4889 if (isa<ResumeInst>(I)) 4890 return false; 4891 if (isa<ReturnInst>(I)) 4892 return false; 4893 if (isa<UnreachableInst>(I)) 4894 return false; 4895 4896 // Calls can throw, or contain an infinite loop, or kill the process. 4897 if (const auto *CB = dyn_cast<CallBase>(I)) { 4898 // Call sites that throw have implicit non-local control flow. 4899 if (!CB->doesNotThrow()) 4900 return false; 4901 4902 // A function which doens't throw and has "willreturn" attribute will 4903 // always return. 4904 if (CB->hasFnAttr(Attribute::WillReturn)) 4905 return true; 4906 4907 // Non-throwing call sites can loop infinitely, call exit/pthread_exit 4908 // etc. and thus not return. However, LLVM already assumes that 4909 // 4910 // - Thread exiting actions are modeled as writes to memory invisible to 4911 // the program. 4912 // 4913 // - Loops that don't have side effects (side effects are volatile/atomic 4914 // stores and IO) always terminate (see http://llvm.org/PR965). 4915 // Furthermore IO itself is also modeled as writes to memory invisible to 4916 // the program. 4917 // 4918 // We rely on those assumptions here, and use the memory effects of the call 4919 // target as a proxy for checking that it always returns. 4920 4921 // FIXME: This isn't aggressive enough; a call which only writes to a global 4922 // is guaranteed to return. 4923 return CB->onlyReadsMemory() || CB->onlyAccessesArgMemory(); 4924 } 4925 4926 // Other instructions return normally. 4927 return true; 4928 } 4929 4930 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { 4931 // TODO: This is slightly conservative for invoke instruction since exiting 4932 // via an exception *is* normal control for them. 4933 for (auto I = BB->begin(), E = BB->end(); I != E; ++I) 4934 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 4935 return false; 4936 return true; 4937 } 4938 4939 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 4940 const Loop *L) { 4941 // The loop header is guaranteed to be executed for every iteration. 4942 // 4943 // FIXME: Relax this constraint to cover all basic blocks that are 4944 // guaranteed to be executed at every iteration. 4945 if (I->getParent() != L->getHeader()) return false; 4946 4947 for (const Instruction &LI : *L->getHeader()) { 4948 if (&LI == I) return true; 4949 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 4950 } 4951 llvm_unreachable("Instruction not contained in its own parent basic block."); 4952 } 4953 4954 bool llvm::propagatesPoison(const Instruction *I) { 4955 switch (I->getOpcode()) { 4956 case Instruction::Freeze: 4957 case Instruction::Select: 4958 case Instruction::PHI: 4959 case Instruction::Call: 4960 case Instruction::Invoke: 4961 return false; 4962 case Instruction::ICmp: 4963 case Instruction::FCmp: 4964 case Instruction::GetElementPtr: 4965 return true; 4966 default: 4967 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I)) 4968 return true; 4969 4970 // Be conservative and return false. 4971 return false; 4972 } 4973 } 4974 4975 const Value *llvm::getGuaranteedNonPoisonOp(const Instruction *I) { 4976 switch (I->getOpcode()) { 4977 case Instruction::Store: 4978 return cast<StoreInst>(I)->getPointerOperand(); 4979 4980 case Instruction::Load: 4981 return cast<LoadInst>(I)->getPointerOperand(); 4982 4983 case Instruction::AtomicCmpXchg: 4984 return cast<AtomicCmpXchgInst>(I)->getPointerOperand(); 4985 4986 case Instruction::AtomicRMW: 4987 return cast<AtomicRMWInst>(I)->getPointerOperand(); 4988 4989 case Instruction::UDiv: 4990 case Instruction::SDiv: 4991 case Instruction::URem: 4992 case Instruction::SRem: 4993 return I->getOperand(1); 4994 4995 case Instruction::Call: 4996 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 4997 switch (II->getIntrinsicID()) { 4998 case Intrinsic::assume: 4999 return II->getArgOperand(0); 5000 default: 5001 return nullptr; 5002 } 5003 } 5004 return nullptr; 5005 5006 default: 5007 return nullptr; 5008 } 5009 } 5010 5011 bool llvm::mustTriggerUB(const Instruction *I, 5012 const SmallSet<const Value *, 16>& KnownPoison) { 5013 auto *NotPoison = getGuaranteedNonPoisonOp(I); 5014 return (NotPoison && KnownPoison.count(NotPoison)); 5015 } 5016 5017 5018 bool llvm::programUndefinedIfPoison(const Instruction *PoisonI) { 5019 // We currently only look for uses of poison values within the same basic 5020 // block, as that makes it easier to guarantee that the uses will be 5021 // executed given that PoisonI is executed. 5022 // 5023 // FIXME: Expand this to consider uses beyond the same basic block. To do 5024 // this, look out for the distinction between post-dominance and strong 5025 // post-dominance. 5026 const BasicBlock *BB = PoisonI->getParent(); 5027 5028 // Set of instructions that we have proved will yield poison if PoisonI 5029 // does. 5030 SmallSet<const Value *, 16> YieldsPoison; 5031 SmallSet<const BasicBlock *, 4> Visited; 5032 YieldsPoison.insert(PoisonI); 5033 Visited.insert(PoisonI->getParent()); 5034 5035 BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end(); 5036 5037 unsigned Iter = 0; 5038 while (Iter++ < MaxDepth) { 5039 for (auto &I : make_range(Begin, End)) { 5040 if (&I != PoisonI) { 5041 if (mustTriggerUB(&I, YieldsPoison)) 5042 return true; 5043 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5044 return false; 5045 } 5046 5047 // Mark poison that propagates from I through uses of I. 5048 if (YieldsPoison.count(&I)) { 5049 for (const User *User : I.users()) { 5050 const Instruction *UserI = cast<Instruction>(User); 5051 if (propagatesPoison(UserI)) 5052 YieldsPoison.insert(User); 5053 } 5054 } 5055 } 5056 5057 if (auto *NextBB = BB->getSingleSuccessor()) { 5058 if (Visited.insert(NextBB).second) { 5059 BB = NextBB; 5060 Begin = BB->getFirstNonPHI()->getIterator(); 5061 End = BB->end(); 5062 continue; 5063 } 5064 } 5065 5066 break; 5067 } 5068 return false; 5069 } 5070 5071 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 5072 if (FMF.noNaNs()) 5073 return true; 5074 5075 if (auto *C = dyn_cast<ConstantFP>(V)) 5076 return !C->isNaN(); 5077 5078 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 5079 if (!C->getElementType()->isFloatingPointTy()) 5080 return false; 5081 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 5082 if (C->getElementAsAPFloat(I).isNaN()) 5083 return false; 5084 } 5085 return true; 5086 } 5087 5088 if (isa<ConstantAggregateZero>(V)) 5089 return true; 5090 5091 return false; 5092 } 5093 5094 static bool isKnownNonZero(const Value *V) { 5095 if (auto *C = dyn_cast<ConstantFP>(V)) 5096 return !C->isZero(); 5097 5098 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 5099 if (!C->getElementType()->isFloatingPointTy()) 5100 return false; 5101 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 5102 if (C->getElementAsAPFloat(I).isZero()) 5103 return false; 5104 } 5105 return true; 5106 } 5107 5108 return false; 5109 } 5110 5111 /// Match clamp pattern for float types without care about NaNs or signed zeros. 5112 /// Given non-min/max outer cmp/select from the clamp pattern this 5113 /// function recognizes if it can be substitued by a "canonical" min/max 5114 /// pattern. 5115 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, 5116 Value *CmpLHS, Value *CmpRHS, 5117 Value *TrueVal, Value *FalseVal, 5118 Value *&LHS, Value *&RHS) { 5119 // Try to match 5120 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) 5121 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) 5122 // and return description of the outer Max/Min. 5123 5124 // First, check if select has inverse order: 5125 if (CmpRHS == FalseVal) { 5126 std::swap(TrueVal, FalseVal); 5127 Pred = CmpInst::getInversePredicate(Pred); 5128 } 5129 5130 // Assume success now. If there's no match, callers should not use these anyway. 5131 LHS = TrueVal; 5132 RHS = FalseVal; 5133 5134 const APFloat *FC1; 5135 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) 5136 return {SPF_UNKNOWN, SPNB_NA, false}; 5137 5138 const APFloat *FC2; 5139 switch (Pred) { 5140 case CmpInst::FCMP_OLT: 5141 case CmpInst::FCMP_OLE: 5142 case CmpInst::FCMP_ULT: 5143 case CmpInst::FCMP_ULE: 5144 if (match(FalseVal, 5145 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), 5146 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && 5147 *FC1 < *FC2) 5148 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; 5149 break; 5150 case CmpInst::FCMP_OGT: 5151 case CmpInst::FCMP_OGE: 5152 case CmpInst::FCMP_UGT: 5153 case CmpInst::FCMP_UGE: 5154 if (match(FalseVal, 5155 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), 5156 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && 5157 *FC1 > *FC2) 5158 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; 5159 break; 5160 default: 5161 break; 5162 } 5163 5164 return {SPF_UNKNOWN, SPNB_NA, false}; 5165 } 5166 5167 /// Recognize variations of: 5168 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 5169 static SelectPatternResult matchClamp(CmpInst::Predicate Pred, 5170 Value *CmpLHS, Value *CmpRHS, 5171 Value *TrueVal, Value *FalseVal) { 5172 // Swap the select operands and predicate to match the patterns below. 5173 if (CmpRHS != TrueVal) { 5174 Pred = ICmpInst::getSwappedPredicate(Pred); 5175 std::swap(TrueVal, FalseVal); 5176 } 5177 const APInt *C1; 5178 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 5179 const APInt *C2; 5180 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 5181 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 5182 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 5183 return {SPF_SMAX, SPNB_NA, false}; 5184 5185 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 5186 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 5187 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 5188 return {SPF_SMIN, SPNB_NA, false}; 5189 5190 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 5191 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 5192 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 5193 return {SPF_UMAX, SPNB_NA, false}; 5194 5195 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 5196 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 5197 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 5198 return {SPF_UMIN, SPNB_NA, false}; 5199 } 5200 return {SPF_UNKNOWN, SPNB_NA, false}; 5201 } 5202 5203 /// Recognize variations of: 5204 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) 5205 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, 5206 Value *CmpLHS, Value *CmpRHS, 5207 Value *TVal, Value *FVal, 5208 unsigned Depth) { 5209 // TODO: Allow FP min/max with nnan/nsz. 5210 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison"); 5211 5212 Value *A = nullptr, *B = nullptr; 5213 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); 5214 if (!SelectPatternResult::isMinOrMax(L.Flavor)) 5215 return {SPF_UNKNOWN, SPNB_NA, false}; 5216 5217 Value *C = nullptr, *D = nullptr; 5218 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); 5219 if (L.Flavor != R.Flavor) 5220 return {SPF_UNKNOWN, SPNB_NA, false}; 5221 5222 // We have something like: x Pred y ? min(a, b) : min(c, d). 5223 // Try to match the compare to the min/max operations of the select operands. 5224 // First, make sure we have the right compare predicate. 5225 switch (L.Flavor) { 5226 case SPF_SMIN: 5227 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { 5228 Pred = ICmpInst::getSwappedPredicate(Pred); 5229 std::swap(CmpLHS, CmpRHS); 5230 } 5231 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 5232 break; 5233 return {SPF_UNKNOWN, SPNB_NA, false}; 5234 case SPF_SMAX: 5235 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { 5236 Pred = ICmpInst::getSwappedPredicate(Pred); 5237 std::swap(CmpLHS, CmpRHS); 5238 } 5239 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 5240 break; 5241 return {SPF_UNKNOWN, SPNB_NA, false}; 5242 case SPF_UMIN: 5243 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { 5244 Pred = ICmpInst::getSwappedPredicate(Pred); 5245 std::swap(CmpLHS, CmpRHS); 5246 } 5247 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 5248 break; 5249 return {SPF_UNKNOWN, SPNB_NA, false}; 5250 case SPF_UMAX: 5251 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 5252 Pred = ICmpInst::getSwappedPredicate(Pred); 5253 std::swap(CmpLHS, CmpRHS); 5254 } 5255 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 5256 break; 5257 return {SPF_UNKNOWN, SPNB_NA, false}; 5258 default: 5259 return {SPF_UNKNOWN, SPNB_NA, false}; 5260 } 5261 5262 // If there is a common operand in the already matched min/max and the other 5263 // min/max operands match the compare operands (either directly or inverted), 5264 // then this is min/max of the same flavor. 5265 5266 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 5267 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 5268 if (D == B) { 5269 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 5270 match(A, m_Not(m_Specific(CmpRHS))))) 5271 return {L.Flavor, SPNB_NA, false}; 5272 } 5273 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 5274 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 5275 if (C == B) { 5276 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 5277 match(A, m_Not(m_Specific(CmpRHS))))) 5278 return {L.Flavor, SPNB_NA, false}; 5279 } 5280 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 5281 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 5282 if (D == A) { 5283 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 5284 match(B, m_Not(m_Specific(CmpRHS))))) 5285 return {L.Flavor, SPNB_NA, false}; 5286 } 5287 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 5288 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 5289 if (C == A) { 5290 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 5291 match(B, m_Not(m_Specific(CmpRHS))))) 5292 return {L.Flavor, SPNB_NA, false}; 5293 } 5294 5295 return {SPF_UNKNOWN, SPNB_NA, false}; 5296 } 5297 5298 /// If the input value is the result of a 'not' op, constant integer, or vector 5299 /// splat of a constant integer, return the bitwise-not source value. 5300 /// TODO: This could be extended to handle non-splat vector integer constants. 5301 static Value *getNotValue(Value *V) { 5302 Value *NotV; 5303 if (match(V, m_Not(m_Value(NotV)))) 5304 return NotV; 5305 5306 const APInt *C; 5307 if (match(V, m_APInt(C))) 5308 return ConstantInt::get(V->getType(), ~(*C)); 5309 5310 return nullptr; 5311 } 5312 5313 /// Match non-obvious integer minimum and maximum sequences. 5314 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 5315 Value *CmpLHS, Value *CmpRHS, 5316 Value *TrueVal, Value *FalseVal, 5317 Value *&LHS, Value *&RHS, 5318 unsigned Depth) { 5319 // Assume success. If there's no match, callers should not use these anyway. 5320 LHS = TrueVal; 5321 RHS = FalseVal; 5322 5323 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); 5324 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 5325 return SPR; 5326 5327 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); 5328 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 5329 return SPR; 5330 5331 // Look through 'not' ops to find disguised min/max. 5332 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y) 5333 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y) 5334 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) { 5335 switch (Pred) { 5336 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false}; 5337 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false}; 5338 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false}; 5339 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false}; 5340 default: break; 5341 } 5342 } 5343 5344 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X) 5345 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X) 5346 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) { 5347 switch (Pred) { 5348 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false}; 5349 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false}; 5350 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false}; 5351 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false}; 5352 default: break; 5353 } 5354 } 5355 5356 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 5357 return {SPF_UNKNOWN, SPNB_NA, false}; 5358 5359 // Z = X -nsw Y 5360 // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0) 5361 // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0) 5362 if (match(TrueVal, m_Zero()) && 5363 match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 5364 return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false}; 5365 5366 // Z = X -nsw Y 5367 // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0) 5368 // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0) 5369 if (match(FalseVal, m_Zero()) && 5370 match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS)))) 5371 return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false}; 5372 5373 const APInt *C1; 5374 if (!match(CmpRHS, m_APInt(C1))) 5375 return {SPF_UNKNOWN, SPNB_NA, false}; 5376 5377 // An unsigned min/max can be written with a signed compare. 5378 const APInt *C2; 5379 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 5380 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 5381 // Is the sign bit set? 5382 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 5383 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 5384 if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() && 5385 C2->isMaxSignedValue()) 5386 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 5387 5388 // Is the sign bit clear? 5389 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 5390 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 5391 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() && 5392 C2->isMinSignedValue()) 5393 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 5394 } 5395 5396 return {SPF_UNKNOWN, SPNB_NA, false}; 5397 } 5398 5399 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) { 5400 assert(X && Y && "Invalid operand"); 5401 5402 // X = sub (0, Y) || X = sub nsw (0, Y) 5403 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) || 5404 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y))))) 5405 return true; 5406 5407 // Y = sub (0, X) || Y = sub nsw (0, X) 5408 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) || 5409 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X))))) 5410 return true; 5411 5412 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A) 5413 Value *A, *B; 5414 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) && 5415 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) || 5416 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) && 5417 match(Y, m_NSWSub(m_Specific(B), m_Specific(A))))); 5418 } 5419 5420 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 5421 FastMathFlags FMF, 5422 Value *CmpLHS, Value *CmpRHS, 5423 Value *TrueVal, Value *FalseVal, 5424 Value *&LHS, Value *&RHS, 5425 unsigned Depth) { 5426 if (CmpInst::isFPPredicate(Pred)) { 5427 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one 5428 // 0.0 operand, set the compare's 0.0 operands to that same value for the 5429 // purpose of identifying min/max. Disregard vector constants with undefined 5430 // elements because those can not be back-propagated for analysis. 5431 Value *OutputZeroVal = nullptr; 5432 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) && 5433 !cast<Constant>(TrueVal)->containsUndefElement()) 5434 OutputZeroVal = TrueVal; 5435 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) && 5436 !cast<Constant>(FalseVal)->containsUndefElement()) 5437 OutputZeroVal = FalseVal; 5438 5439 if (OutputZeroVal) { 5440 if (match(CmpLHS, m_AnyZeroFP())) 5441 CmpLHS = OutputZeroVal; 5442 if (match(CmpRHS, m_AnyZeroFP())) 5443 CmpRHS = OutputZeroVal; 5444 } 5445 } 5446 5447 LHS = CmpLHS; 5448 RHS = CmpRHS; 5449 5450 // Signed zero may return inconsistent results between implementations. 5451 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 5452 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 5453 // Therefore, we behave conservatively and only proceed if at least one of the 5454 // operands is known to not be zero or if we don't care about signed zero. 5455 switch (Pred) { 5456 default: break; 5457 // FIXME: Include OGT/OLT/UGT/ULT. 5458 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 5459 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 5460 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 5461 !isKnownNonZero(CmpRHS)) 5462 return {SPF_UNKNOWN, SPNB_NA, false}; 5463 } 5464 5465 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 5466 bool Ordered = false; 5467 5468 // When given one NaN and one non-NaN input: 5469 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 5470 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 5471 // ordered comparison fails), which could be NaN or non-NaN. 5472 // so here we discover exactly what NaN behavior is required/accepted. 5473 if (CmpInst::isFPPredicate(Pred)) { 5474 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 5475 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 5476 5477 if (LHSSafe && RHSSafe) { 5478 // Both operands are known non-NaN. 5479 NaNBehavior = SPNB_RETURNS_ANY; 5480 } else if (CmpInst::isOrdered(Pred)) { 5481 // An ordered comparison will return false when given a NaN, so it 5482 // returns the RHS. 5483 Ordered = true; 5484 if (LHSSafe) 5485 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 5486 NaNBehavior = SPNB_RETURNS_NAN; 5487 else if (RHSSafe) 5488 NaNBehavior = SPNB_RETURNS_OTHER; 5489 else 5490 // Completely unsafe. 5491 return {SPF_UNKNOWN, SPNB_NA, false}; 5492 } else { 5493 Ordered = false; 5494 // An unordered comparison will return true when given a NaN, so it 5495 // returns the LHS. 5496 if (LHSSafe) 5497 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 5498 NaNBehavior = SPNB_RETURNS_OTHER; 5499 else if (RHSSafe) 5500 NaNBehavior = SPNB_RETURNS_NAN; 5501 else 5502 // Completely unsafe. 5503 return {SPF_UNKNOWN, SPNB_NA, false}; 5504 } 5505 } 5506 5507 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 5508 std::swap(CmpLHS, CmpRHS); 5509 Pred = CmpInst::getSwappedPredicate(Pred); 5510 if (NaNBehavior == SPNB_RETURNS_NAN) 5511 NaNBehavior = SPNB_RETURNS_OTHER; 5512 else if (NaNBehavior == SPNB_RETURNS_OTHER) 5513 NaNBehavior = SPNB_RETURNS_NAN; 5514 Ordered = !Ordered; 5515 } 5516 5517 // ([if]cmp X, Y) ? X : Y 5518 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 5519 switch (Pred) { 5520 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 5521 case ICmpInst::ICMP_UGT: 5522 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 5523 case ICmpInst::ICMP_SGT: 5524 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 5525 case ICmpInst::ICMP_ULT: 5526 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 5527 case ICmpInst::ICMP_SLT: 5528 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 5529 case FCmpInst::FCMP_UGT: 5530 case FCmpInst::FCMP_UGE: 5531 case FCmpInst::FCMP_OGT: 5532 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 5533 case FCmpInst::FCMP_ULT: 5534 case FCmpInst::FCMP_ULE: 5535 case FCmpInst::FCMP_OLT: 5536 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 5537 } 5538 } 5539 5540 if (isKnownNegation(TrueVal, FalseVal)) { 5541 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can 5542 // match against either LHS or sext(LHS). 5543 auto MaybeSExtCmpLHS = 5544 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS))); 5545 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes()); 5546 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One()); 5547 if (match(TrueVal, MaybeSExtCmpLHS)) { 5548 // Set the return values. If the compare uses the negated value (-X >s 0), 5549 // swap the return values because the negated value is always 'RHS'. 5550 LHS = TrueVal; 5551 RHS = FalseVal; 5552 if (match(CmpLHS, m_Neg(m_Specific(FalseVal)))) 5553 std::swap(LHS, RHS); 5554 5555 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X) 5556 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X) 5557 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 5558 return {SPF_ABS, SPNB_NA, false}; 5559 5560 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X) 5561 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne)) 5562 return {SPF_ABS, SPNB_NA, false}; 5563 5564 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X) 5565 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X) 5566 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 5567 return {SPF_NABS, SPNB_NA, false}; 5568 } 5569 else if (match(FalseVal, MaybeSExtCmpLHS)) { 5570 // Set the return values. If the compare uses the negated value (-X >s 0), 5571 // swap the return values because the negated value is always 'RHS'. 5572 LHS = FalseVal; 5573 RHS = TrueVal; 5574 if (match(CmpLHS, m_Neg(m_Specific(TrueVal)))) 5575 std::swap(LHS, RHS); 5576 5577 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X) 5578 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X) 5579 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 5580 return {SPF_NABS, SPNB_NA, false}; 5581 5582 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X) 5583 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X) 5584 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 5585 return {SPF_ABS, SPNB_NA, false}; 5586 } 5587 } 5588 5589 if (CmpInst::isIntPredicate(Pred)) 5590 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); 5591 5592 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar 5593 // may return either -0.0 or 0.0, so fcmp/select pair has stricter 5594 // semantics than minNum. Be conservative in such case. 5595 if (NaNBehavior != SPNB_RETURNS_ANY || 5596 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 5597 !isKnownNonZero(CmpRHS))) 5598 return {SPF_UNKNOWN, SPNB_NA, false}; 5599 5600 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 5601 } 5602 5603 /// Helps to match a select pattern in case of a type mismatch. 5604 /// 5605 /// The function processes the case when type of true and false values of a 5606 /// select instruction differs from type of the cmp instruction operands because 5607 /// of a cast instruction. The function checks if it is legal to move the cast 5608 /// operation after "select". If yes, it returns the new second value of 5609 /// "select" (with the assumption that cast is moved): 5610 /// 1. As operand of cast instruction when both values of "select" are same cast 5611 /// instructions. 5612 /// 2. As restored constant (by applying reverse cast operation) when the first 5613 /// value of the "select" is a cast operation and the second value is a 5614 /// constant. 5615 /// NOTE: We return only the new second value because the first value could be 5616 /// accessed as operand of cast instruction. 5617 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 5618 Instruction::CastOps *CastOp) { 5619 auto *Cast1 = dyn_cast<CastInst>(V1); 5620 if (!Cast1) 5621 return nullptr; 5622 5623 *CastOp = Cast1->getOpcode(); 5624 Type *SrcTy = Cast1->getSrcTy(); 5625 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 5626 // If V1 and V2 are both the same cast from the same type, look through V1. 5627 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 5628 return Cast2->getOperand(0); 5629 return nullptr; 5630 } 5631 5632 auto *C = dyn_cast<Constant>(V2); 5633 if (!C) 5634 return nullptr; 5635 5636 Constant *CastedTo = nullptr; 5637 switch (*CastOp) { 5638 case Instruction::ZExt: 5639 if (CmpI->isUnsigned()) 5640 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 5641 break; 5642 case Instruction::SExt: 5643 if (CmpI->isSigned()) 5644 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 5645 break; 5646 case Instruction::Trunc: 5647 Constant *CmpConst; 5648 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && 5649 CmpConst->getType() == SrcTy) { 5650 // Here we have the following case: 5651 // 5652 // %cond = cmp iN %x, CmpConst 5653 // %tr = trunc iN %x to iK 5654 // %narrowsel = select i1 %cond, iK %t, iK C 5655 // 5656 // We can always move trunc after select operation: 5657 // 5658 // %cond = cmp iN %x, CmpConst 5659 // %widesel = select i1 %cond, iN %x, iN CmpConst 5660 // %tr = trunc iN %widesel to iK 5661 // 5662 // Note that C could be extended in any way because we don't care about 5663 // upper bits after truncation. It can't be abs pattern, because it would 5664 // look like: 5665 // 5666 // select i1 %cond, x, -x. 5667 // 5668 // So only min/max pattern could be matched. Such match requires widened C 5669 // == CmpConst. That is why set widened C = CmpConst, condition trunc 5670 // CmpConst == C is checked below. 5671 CastedTo = CmpConst; 5672 } else { 5673 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 5674 } 5675 break; 5676 case Instruction::FPTrunc: 5677 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 5678 break; 5679 case Instruction::FPExt: 5680 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 5681 break; 5682 case Instruction::FPToUI: 5683 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 5684 break; 5685 case Instruction::FPToSI: 5686 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 5687 break; 5688 case Instruction::UIToFP: 5689 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 5690 break; 5691 case Instruction::SIToFP: 5692 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 5693 break; 5694 default: 5695 break; 5696 } 5697 5698 if (!CastedTo) 5699 return nullptr; 5700 5701 // Make sure the cast doesn't lose any information. 5702 Constant *CastedBack = 5703 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 5704 if (CastedBack != C) 5705 return nullptr; 5706 5707 return CastedTo; 5708 } 5709 5710 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 5711 Instruction::CastOps *CastOp, 5712 unsigned Depth) { 5713 if (Depth >= MaxDepth) 5714 return {SPF_UNKNOWN, SPNB_NA, false}; 5715 5716 SelectInst *SI = dyn_cast<SelectInst>(V); 5717 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 5718 5719 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 5720 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 5721 5722 Value *TrueVal = SI->getTrueValue(); 5723 Value *FalseVal = SI->getFalseValue(); 5724 5725 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS, 5726 CastOp, Depth); 5727 } 5728 5729 SelectPatternResult llvm::matchDecomposedSelectPattern( 5730 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, 5731 Instruction::CastOps *CastOp, unsigned Depth) { 5732 CmpInst::Predicate Pred = CmpI->getPredicate(); 5733 Value *CmpLHS = CmpI->getOperand(0); 5734 Value *CmpRHS = CmpI->getOperand(1); 5735 FastMathFlags FMF; 5736 if (isa<FPMathOperator>(CmpI)) 5737 FMF = CmpI->getFastMathFlags(); 5738 5739 // Bail out early. 5740 if (CmpI->isEquality()) 5741 return {SPF_UNKNOWN, SPNB_NA, false}; 5742 5743 // Deal with type mismatches. 5744 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 5745 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { 5746 // If this is a potential fmin/fmax with a cast to integer, then ignore 5747 // -0.0 because there is no corresponding integer value. 5748 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 5749 FMF.setNoSignedZeros(); 5750 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 5751 cast<CastInst>(TrueVal)->getOperand(0), C, 5752 LHS, RHS, Depth); 5753 } 5754 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { 5755 // If this is a potential fmin/fmax with a cast to integer, then ignore 5756 // -0.0 because there is no corresponding integer value. 5757 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 5758 FMF.setNoSignedZeros(); 5759 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 5760 C, cast<CastInst>(FalseVal)->getOperand(0), 5761 LHS, RHS, Depth); 5762 } 5763 } 5764 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 5765 LHS, RHS, Depth); 5766 } 5767 5768 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) { 5769 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT; 5770 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT; 5771 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT; 5772 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT; 5773 if (SPF == SPF_FMINNUM) 5774 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; 5775 if (SPF == SPF_FMAXNUM) 5776 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; 5777 llvm_unreachable("unhandled!"); 5778 } 5779 5780 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) { 5781 if (SPF == SPF_SMIN) return SPF_SMAX; 5782 if (SPF == SPF_UMIN) return SPF_UMAX; 5783 if (SPF == SPF_SMAX) return SPF_SMIN; 5784 if (SPF == SPF_UMAX) return SPF_UMIN; 5785 llvm_unreachable("unhandled!"); 5786 } 5787 5788 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) { 5789 return getMinMaxPred(getInverseMinMaxFlavor(SPF)); 5790 } 5791 5792 /// Return true if "icmp Pred LHS RHS" is always true. 5793 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, 5794 const Value *RHS, const DataLayout &DL, 5795 unsigned Depth) { 5796 assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!"); 5797 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 5798 return true; 5799 5800 switch (Pred) { 5801 default: 5802 return false; 5803 5804 case CmpInst::ICMP_SLE: { 5805 const APInt *C; 5806 5807 // LHS s<= LHS +_{nsw} C if C >= 0 5808 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 5809 return !C->isNegative(); 5810 return false; 5811 } 5812 5813 case CmpInst::ICMP_ULE: { 5814 const APInt *C; 5815 5816 // LHS u<= LHS +_{nuw} C for any C 5817 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 5818 return true; 5819 5820 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 5821 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 5822 const Value *&X, 5823 const APInt *&CA, const APInt *&CB) { 5824 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 5825 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 5826 return true; 5827 5828 // If X & C == 0 then (X | C) == X +_{nuw} C 5829 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 5830 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 5831 KnownBits Known(CA->getBitWidth()); 5832 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, 5833 /*CxtI*/ nullptr, /*DT*/ nullptr); 5834 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 5835 return true; 5836 } 5837 5838 return false; 5839 }; 5840 5841 const Value *X; 5842 const APInt *CLHS, *CRHS; 5843 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 5844 return CLHS->ule(*CRHS); 5845 5846 return false; 5847 } 5848 } 5849 } 5850 5851 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 5852 /// ALHS ARHS" is true. Otherwise, return None. 5853 static Optional<bool> 5854 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 5855 const Value *ARHS, const Value *BLHS, const Value *BRHS, 5856 const DataLayout &DL, unsigned Depth) { 5857 switch (Pred) { 5858 default: 5859 return None; 5860 5861 case CmpInst::ICMP_SLT: 5862 case CmpInst::ICMP_SLE: 5863 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && 5864 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) 5865 return true; 5866 return None; 5867 5868 case CmpInst::ICMP_ULT: 5869 case CmpInst::ICMP_ULE: 5870 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && 5871 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) 5872 return true; 5873 return None; 5874 } 5875 } 5876 5877 /// Return true if the operands of the two compares match. IsSwappedOps is true 5878 /// when the operands match, but are swapped. 5879 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 5880 const Value *BLHS, const Value *BRHS, 5881 bool &IsSwappedOps) { 5882 5883 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 5884 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 5885 return IsMatchingOps || IsSwappedOps; 5886 } 5887 5888 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true. 5889 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false. 5890 /// Otherwise, return None if we can't infer anything. 5891 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 5892 CmpInst::Predicate BPred, 5893 bool AreSwappedOps) { 5894 // Canonicalize the predicate as if the operands were not commuted. 5895 if (AreSwappedOps) 5896 BPred = ICmpInst::getSwappedPredicate(BPred); 5897 5898 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 5899 return true; 5900 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 5901 return false; 5902 5903 return None; 5904 } 5905 5906 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true. 5907 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false. 5908 /// Otherwise, return None if we can't infer anything. 5909 static Optional<bool> 5910 isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, 5911 const ConstantInt *C1, 5912 CmpInst::Predicate BPred, 5913 const ConstantInt *C2) { 5914 ConstantRange DomCR = 5915 ConstantRange::makeExactICmpRegion(APred, C1->getValue()); 5916 ConstantRange CR = 5917 ConstantRange::makeAllowedICmpRegion(BPred, C2->getValue()); 5918 ConstantRange Intersection = DomCR.intersectWith(CR); 5919 ConstantRange Difference = DomCR.difference(CR); 5920 if (Intersection.isEmptySet()) 5921 return false; 5922 if (Difference.isEmptySet()) 5923 return true; 5924 return None; 5925 } 5926 5927 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 5928 /// false. Otherwise, return None if we can't infer anything. 5929 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS, 5930 CmpInst::Predicate BPred, 5931 const Value *BLHS, const Value *BRHS, 5932 const DataLayout &DL, bool LHSIsTrue, 5933 unsigned Depth) { 5934 Value *ALHS = LHS->getOperand(0); 5935 Value *ARHS = LHS->getOperand(1); 5936 5937 // The rest of the logic assumes the LHS condition is true. If that's not the 5938 // case, invert the predicate to make it so. 5939 CmpInst::Predicate APred = 5940 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); 5941 5942 // Can we infer anything when the two compares have matching operands? 5943 bool AreSwappedOps; 5944 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) { 5945 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 5946 APred, BPred, AreSwappedOps)) 5947 return Implication; 5948 // No amount of additional analysis will infer the second condition, so 5949 // early exit. 5950 return None; 5951 } 5952 5953 // Can we infer anything when the LHS operands match and the RHS operands are 5954 // constants (not necessarily matching)? 5955 if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) { 5956 if (Optional<bool> Implication = isImpliedCondMatchingImmOperands( 5957 APred, cast<ConstantInt>(ARHS), BPred, cast<ConstantInt>(BRHS))) 5958 return Implication; 5959 // No amount of additional analysis will infer the second condition, so 5960 // early exit. 5961 return None; 5962 } 5963 5964 if (APred == BPred) 5965 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth); 5966 return None; 5967 } 5968 5969 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 5970 /// false. Otherwise, return None if we can't infer anything. We expect the 5971 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction. 5972 static Optional<bool> 5973 isImpliedCondAndOr(const BinaryOperator *LHS, CmpInst::Predicate RHSPred, 5974 const Value *RHSOp0, const Value *RHSOp1, 5975 5976 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { 5977 // The LHS must be an 'or' or an 'and' instruction. 5978 assert((LHS->getOpcode() == Instruction::And || 5979 LHS->getOpcode() == Instruction::Or) && 5980 "Expected LHS to be 'and' or 'or'."); 5981 5982 assert(Depth <= MaxDepth && "Hit recursion limit"); 5983 5984 // If the result of an 'or' is false, then we know both legs of the 'or' are 5985 // false. Similarly, if the result of an 'and' is true, then we know both 5986 // legs of the 'and' are true. 5987 Value *ALHS, *ARHS; 5988 if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) || 5989 (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) { 5990 // FIXME: Make this non-recursion. 5991 if (Optional<bool> Implication = isImpliedCondition( 5992 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) 5993 return Implication; 5994 if (Optional<bool> Implication = isImpliedCondition( 5995 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) 5996 return Implication; 5997 return None; 5998 } 5999 return None; 6000 } 6001 6002 Optional<bool> 6003 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred, 6004 const Value *RHSOp0, const Value *RHSOp1, 6005 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { 6006 // Bail out when we hit the limit. 6007 if (Depth == MaxDepth) 6008 return None; 6009 6010 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for 6011 // example. 6012 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy()) 6013 return None; 6014 6015 Type *OpTy = LHS->getType(); 6016 assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!"); 6017 6018 // FIXME: Extending the code below to handle vectors. 6019 if (OpTy->isVectorTy()) 6020 return None; 6021 6022 assert(OpTy->isIntegerTy(1) && "implied by above"); 6023 6024 // Both LHS and RHS are icmps. 6025 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); 6026 if (LHSCmp) 6027 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, 6028 Depth); 6029 6030 /// The LHS should be an 'or' or an 'and' instruction. We expect the RHS to 6031 /// be / an icmp. FIXME: Add support for and/or on the RHS. 6032 const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS); 6033 if (LHSBO) { 6034 if ((LHSBO->getOpcode() == Instruction::And || 6035 LHSBO->getOpcode() == Instruction::Or)) 6036 return isImpliedCondAndOr(LHSBO, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, 6037 Depth); 6038 } 6039 return None; 6040 } 6041 6042 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 6043 const DataLayout &DL, bool LHSIsTrue, 6044 unsigned Depth) { 6045 // LHS ==> RHS by definition 6046 if (LHS == RHS) 6047 return LHSIsTrue; 6048 6049 const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS); 6050 if (RHSCmp) 6051 return isImpliedCondition(LHS, RHSCmp->getPredicate(), 6052 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL, 6053 LHSIsTrue, Depth); 6054 return None; 6055 } 6056 6057 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch 6058 // condition dominating ContextI or nullptr, if no condition is found. 6059 static std::pair<Value *, bool> 6060 getDomPredecessorCondition(const Instruction *ContextI) { 6061 if (!ContextI || !ContextI->getParent()) 6062 return {nullptr, false}; 6063 6064 // TODO: This is a poor/cheap way to determine dominance. Should we use a 6065 // dominator tree (eg, from a SimplifyQuery) instead? 6066 const BasicBlock *ContextBB = ContextI->getParent(); 6067 const BasicBlock *PredBB = ContextBB->getSinglePredecessor(); 6068 if (!PredBB) 6069 return {nullptr, false}; 6070 6071 // We need a conditional branch in the predecessor. 6072 Value *PredCond; 6073 BasicBlock *TrueBB, *FalseBB; 6074 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB))) 6075 return {nullptr, false}; 6076 6077 // The branch should get simplified. Don't bother simplifying this condition. 6078 if (TrueBB == FalseBB) 6079 return {nullptr, false}; 6080 6081 assert((TrueBB == ContextBB || FalseBB == ContextBB) && 6082 "Predecessor block does not point to successor?"); 6083 6084 // Is this condition implied by the predecessor condition? 6085 return {PredCond, TrueBB == ContextBB}; 6086 } 6087 6088 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond, 6089 const Instruction *ContextI, 6090 const DataLayout &DL) { 6091 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool"); 6092 auto PredCond = getDomPredecessorCondition(ContextI); 6093 if (PredCond.first) 6094 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second); 6095 return None; 6096 } 6097 6098 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred, 6099 const Value *LHS, const Value *RHS, 6100 const Instruction *ContextI, 6101 const DataLayout &DL) { 6102 auto PredCond = getDomPredecessorCondition(ContextI); 6103 if (PredCond.first) 6104 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL, 6105 PredCond.second); 6106 return None; 6107 } 6108 6109 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, 6110 APInt &Upper, const InstrInfoQuery &IIQ) { 6111 unsigned Width = Lower.getBitWidth(); 6112 const APInt *C; 6113 switch (BO.getOpcode()) { 6114 case Instruction::Add: 6115 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) { 6116 // FIXME: If we have both nuw and nsw, we should reduce the range further. 6117 if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) { 6118 // 'add nuw x, C' produces [C, UINT_MAX]. 6119 Lower = *C; 6120 } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) { 6121 if (C->isNegative()) { 6122 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C]. 6123 Lower = APInt::getSignedMinValue(Width); 6124 Upper = APInt::getSignedMaxValue(Width) + *C + 1; 6125 } else { 6126 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX]. 6127 Lower = APInt::getSignedMinValue(Width) + *C; 6128 Upper = APInt::getSignedMaxValue(Width) + 1; 6129 } 6130 } 6131 } 6132 break; 6133 6134 case Instruction::And: 6135 if (match(BO.getOperand(1), m_APInt(C))) 6136 // 'and x, C' produces [0, C]. 6137 Upper = *C + 1; 6138 break; 6139 6140 case Instruction::Or: 6141 if (match(BO.getOperand(1), m_APInt(C))) 6142 // 'or x, C' produces [C, UINT_MAX]. 6143 Lower = *C; 6144 break; 6145 6146 case Instruction::AShr: 6147 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { 6148 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C]. 6149 Lower = APInt::getSignedMinValue(Width).ashr(*C); 6150 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1; 6151 } else if (match(BO.getOperand(0), m_APInt(C))) { 6152 unsigned ShiftAmount = Width - 1; 6153 if (!C->isNullValue() && IIQ.isExact(&BO)) 6154 ShiftAmount = C->countTrailingZeros(); 6155 if (C->isNegative()) { 6156 // 'ashr C, x' produces [C, C >> (Width-1)] 6157 Lower = *C; 6158 Upper = C->ashr(ShiftAmount) + 1; 6159 } else { 6160 // 'ashr C, x' produces [C >> (Width-1), C] 6161 Lower = C->ashr(ShiftAmount); 6162 Upper = *C + 1; 6163 } 6164 } 6165 break; 6166 6167 case Instruction::LShr: 6168 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { 6169 // 'lshr x, C' produces [0, UINT_MAX >> C]. 6170 Upper = APInt::getAllOnesValue(Width).lshr(*C) + 1; 6171 } else if (match(BO.getOperand(0), m_APInt(C))) { 6172 // 'lshr C, x' produces [C >> (Width-1), C]. 6173 unsigned ShiftAmount = Width - 1; 6174 if (!C->isNullValue() && IIQ.isExact(&BO)) 6175 ShiftAmount = C->countTrailingZeros(); 6176 Lower = C->lshr(ShiftAmount); 6177 Upper = *C + 1; 6178 } 6179 break; 6180 6181 case Instruction::Shl: 6182 if (match(BO.getOperand(0), m_APInt(C))) { 6183 if (IIQ.hasNoUnsignedWrap(&BO)) { 6184 // 'shl nuw C, x' produces [C, C << CLZ(C)] 6185 Lower = *C; 6186 Upper = Lower.shl(Lower.countLeadingZeros()) + 1; 6187 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw? 6188 if (C->isNegative()) { 6189 // 'shl nsw C, x' produces [C << CLO(C)-1, C] 6190 unsigned ShiftAmount = C->countLeadingOnes() - 1; 6191 Lower = C->shl(ShiftAmount); 6192 Upper = *C + 1; 6193 } else { 6194 // 'shl nsw C, x' produces [C, C << CLZ(C)-1] 6195 unsigned ShiftAmount = C->countLeadingZeros() - 1; 6196 Lower = *C; 6197 Upper = C->shl(ShiftAmount) + 1; 6198 } 6199 } 6200 } 6201 break; 6202 6203 case Instruction::SDiv: 6204 if (match(BO.getOperand(1), m_APInt(C))) { 6205 APInt IntMin = APInt::getSignedMinValue(Width); 6206 APInt IntMax = APInt::getSignedMaxValue(Width); 6207 if (C->isAllOnesValue()) { 6208 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] 6209 // where C != -1 and C != 0 and C != 1 6210 Lower = IntMin + 1; 6211 Upper = IntMax + 1; 6212 } else if (C->countLeadingZeros() < Width - 1) { 6213 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C] 6214 // where C != -1 and C != 0 and C != 1 6215 Lower = IntMin.sdiv(*C); 6216 Upper = IntMax.sdiv(*C); 6217 if (Lower.sgt(Upper)) 6218 std::swap(Lower, Upper); 6219 Upper = Upper + 1; 6220 assert(Upper != Lower && "Upper part of range has wrapped!"); 6221 } 6222 } else if (match(BO.getOperand(0), m_APInt(C))) { 6223 if (C->isMinSignedValue()) { 6224 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. 6225 Lower = *C; 6226 Upper = Lower.lshr(1) + 1; 6227 } else { 6228 // 'sdiv C, x' produces [-|C|, |C|]. 6229 Upper = C->abs() + 1; 6230 Lower = (-Upper) + 1; 6231 } 6232 } 6233 break; 6234 6235 case Instruction::UDiv: 6236 if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) { 6237 // 'udiv x, C' produces [0, UINT_MAX / C]. 6238 Upper = APInt::getMaxValue(Width).udiv(*C) + 1; 6239 } else if (match(BO.getOperand(0), m_APInt(C))) { 6240 // 'udiv C, x' produces [0, C]. 6241 Upper = *C + 1; 6242 } 6243 break; 6244 6245 case Instruction::SRem: 6246 if (match(BO.getOperand(1), m_APInt(C))) { 6247 // 'srem x, C' produces (-|C|, |C|). 6248 Upper = C->abs(); 6249 Lower = (-Upper) + 1; 6250 } 6251 break; 6252 6253 case Instruction::URem: 6254 if (match(BO.getOperand(1), m_APInt(C))) 6255 // 'urem x, C' produces [0, C). 6256 Upper = *C; 6257 break; 6258 6259 default: 6260 break; 6261 } 6262 } 6263 6264 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower, 6265 APInt &Upper) { 6266 unsigned Width = Lower.getBitWidth(); 6267 const APInt *C; 6268 switch (II.getIntrinsicID()) { 6269 case Intrinsic::uadd_sat: 6270 // uadd.sat(x, C) produces [C, UINT_MAX]. 6271 if (match(II.getOperand(0), m_APInt(C)) || 6272 match(II.getOperand(1), m_APInt(C))) 6273 Lower = *C; 6274 break; 6275 case Intrinsic::sadd_sat: 6276 if (match(II.getOperand(0), m_APInt(C)) || 6277 match(II.getOperand(1), m_APInt(C))) { 6278 if (C->isNegative()) { 6279 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)]. 6280 Lower = APInt::getSignedMinValue(Width); 6281 Upper = APInt::getSignedMaxValue(Width) + *C + 1; 6282 } else { 6283 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX]. 6284 Lower = APInt::getSignedMinValue(Width) + *C; 6285 Upper = APInt::getSignedMaxValue(Width) + 1; 6286 } 6287 } 6288 break; 6289 case Intrinsic::usub_sat: 6290 // usub.sat(C, x) produces [0, C]. 6291 if (match(II.getOperand(0), m_APInt(C))) 6292 Upper = *C + 1; 6293 // usub.sat(x, C) produces [0, UINT_MAX - C]. 6294 else if (match(II.getOperand(1), m_APInt(C))) 6295 Upper = APInt::getMaxValue(Width) - *C + 1; 6296 break; 6297 case Intrinsic::ssub_sat: 6298 if (match(II.getOperand(0), m_APInt(C))) { 6299 if (C->isNegative()) { 6300 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)]. 6301 Lower = APInt::getSignedMinValue(Width); 6302 Upper = *C - APInt::getSignedMinValue(Width) + 1; 6303 } else { 6304 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX]. 6305 Lower = *C - APInt::getSignedMaxValue(Width); 6306 Upper = APInt::getSignedMaxValue(Width) + 1; 6307 } 6308 } else if (match(II.getOperand(1), m_APInt(C))) { 6309 if (C->isNegative()) { 6310 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]: 6311 Lower = APInt::getSignedMinValue(Width) - *C; 6312 Upper = APInt::getSignedMaxValue(Width) + 1; 6313 } else { 6314 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C]. 6315 Lower = APInt::getSignedMinValue(Width); 6316 Upper = APInt::getSignedMaxValue(Width) - *C + 1; 6317 } 6318 } 6319 break; 6320 default: 6321 break; 6322 } 6323 } 6324 6325 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower, 6326 APInt &Upper, const InstrInfoQuery &IIQ) { 6327 const Value *LHS = nullptr, *RHS = nullptr; 6328 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS); 6329 if (R.Flavor == SPF_UNKNOWN) 6330 return; 6331 6332 unsigned BitWidth = SI.getType()->getScalarSizeInBits(); 6333 6334 if (R.Flavor == SelectPatternFlavor::SPF_ABS) { 6335 // If the negation part of the abs (in RHS) has the NSW flag, 6336 // then the result of abs(X) is [0..SIGNED_MAX], 6337 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. 6338 Lower = APInt::getNullValue(BitWidth); 6339 if (match(RHS, m_Neg(m_Specific(LHS))) && 6340 IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) 6341 Upper = APInt::getSignedMaxValue(BitWidth) + 1; 6342 else 6343 Upper = APInt::getSignedMinValue(BitWidth) + 1; 6344 return; 6345 } 6346 6347 if (R.Flavor == SelectPatternFlavor::SPF_NABS) { 6348 // The result of -abs(X) is <= 0. 6349 Lower = APInt::getSignedMinValue(BitWidth); 6350 Upper = APInt(BitWidth, 1); 6351 return; 6352 } 6353 6354 const APInt *C; 6355 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C))) 6356 return; 6357 6358 switch (R.Flavor) { 6359 case SPF_UMIN: 6360 Upper = *C + 1; 6361 break; 6362 case SPF_UMAX: 6363 Lower = *C; 6364 break; 6365 case SPF_SMIN: 6366 Lower = APInt::getSignedMinValue(BitWidth); 6367 Upper = *C + 1; 6368 break; 6369 case SPF_SMAX: 6370 Lower = *C; 6371 Upper = APInt::getSignedMaxValue(BitWidth) + 1; 6372 break; 6373 default: 6374 break; 6375 } 6376 } 6377 6378 ConstantRange llvm::computeConstantRange(const Value *V, bool UseInstrInfo, 6379 AssumptionCache *AC, 6380 const Instruction *CtxI, 6381 unsigned Depth) { 6382 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction"); 6383 6384 if (Depth == MaxDepth) 6385 return ConstantRange::getFull(V->getType()->getScalarSizeInBits()); 6386 6387 const APInt *C; 6388 if (match(V, m_APInt(C))) 6389 return ConstantRange(*C); 6390 6391 InstrInfoQuery IIQ(UseInstrInfo); 6392 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 6393 APInt Lower = APInt(BitWidth, 0); 6394 APInt Upper = APInt(BitWidth, 0); 6395 if (auto *BO = dyn_cast<BinaryOperator>(V)) 6396 setLimitsForBinOp(*BO, Lower, Upper, IIQ); 6397 else if (auto *II = dyn_cast<IntrinsicInst>(V)) 6398 setLimitsForIntrinsic(*II, Lower, Upper); 6399 else if (auto *SI = dyn_cast<SelectInst>(V)) 6400 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ); 6401 6402 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper); 6403 6404 if (auto *I = dyn_cast<Instruction>(V)) 6405 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range)) 6406 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range)); 6407 6408 if (CtxI && AC) { 6409 // Try to restrict the range based on information from assumptions. 6410 for (auto &AssumeVH : AC->assumptionsFor(V)) { 6411 if (!AssumeVH) 6412 continue; 6413 CallInst *I = cast<CallInst>(AssumeVH); 6414 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() && 6415 "Got assumption for the wrong function!"); 6416 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 6417 "must be an assume intrinsic"); 6418 6419 if (!isValidAssumeForContext(I, CtxI, nullptr)) 6420 continue; 6421 Value *Arg = I->getArgOperand(0); 6422 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); 6423 // Currently we just use information from comparisons. 6424 if (!Cmp || Cmp->getOperand(0) != V) 6425 continue; 6426 ConstantRange RHS = computeConstantRange(Cmp->getOperand(1), UseInstrInfo, 6427 AC, I, Depth + 1); 6428 CR = CR.intersectWith( 6429 ConstantRange::makeSatisfyingICmpRegion(Cmp->getPredicate(), RHS)); 6430 } 6431 } 6432 6433 return CR; 6434 } 6435 6436 static Optional<int64_t> 6437 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) { 6438 // Skip over the first indices. 6439 gep_type_iterator GTI = gep_type_begin(GEP); 6440 for (unsigned i = 1; i != Idx; ++i, ++GTI) 6441 /*skip along*/; 6442 6443 // Compute the offset implied by the rest of the indices. 6444 int64_t Offset = 0; 6445 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 6446 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 6447 if (!OpC) 6448 return None; 6449 if (OpC->isZero()) 6450 continue; // No offset. 6451 6452 // Handle struct indices, which add their field offset to the pointer. 6453 if (StructType *STy = GTI.getStructTypeOrNull()) { 6454 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 6455 continue; 6456 } 6457 6458 // Otherwise, we have a sequential type like an array or fixed-length 6459 // vector. Multiply the index by the ElementSize. 6460 TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType()); 6461 if (Size.isScalable()) 6462 return None; 6463 Offset += Size.getFixedSize() * OpC->getSExtValue(); 6464 } 6465 6466 return Offset; 6467 } 6468 6469 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2, 6470 const DataLayout &DL) { 6471 Ptr1 = Ptr1->stripPointerCasts(); 6472 Ptr2 = Ptr2->stripPointerCasts(); 6473 6474 // Handle the trivial case first. 6475 if (Ptr1 == Ptr2) { 6476 return 0; 6477 } 6478 6479 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); 6480 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); 6481 6482 // If one pointer is a GEP see if the GEP is a constant offset from the base, 6483 // as in "P" and "gep P, 1". 6484 // Also do this iteratively to handle the the following case: 6485 // Ptr_t1 = GEP Ptr1, c1 6486 // Ptr_t2 = GEP Ptr_t1, c2 6487 // Ptr2 = GEP Ptr_t2, c3 6488 // where we will return c1+c2+c3. 6489 // TODO: Handle the case when both Ptr1 and Ptr2 are GEPs of some common base 6490 // -- replace getOffsetFromBase with getOffsetAndBase, check that the bases 6491 // are the same, and return the difference between offsets. 6492 auto getOffsetFromBase = [&DL](const GEPOperator *GEP, 6493 const Value *Ptr) -> Optional<int64_t> { 6494 const GEPOperator *GEP_T = GEP; 6495 int64_t OffsetVal = 0; 6496 bool HasSameBase = false; 6497 while (GEP_T) { 6498 auto Offset = getOffsetFromIndex(GEP_T, 1, DL); 6499 if (!Offset) 6500 return None; 6501 OffsetVal += *Offset; 6502 auto Op0 = GEP_T->getOperand(0)->stripPointerCasts(); 6503 if (Op0 == Ptr) { 6504 HasSameBase = true; 6505 break; 6506 } 6507 GEP_T = dyn_cast<GEPOperator>(Op0); 6508 } 6509 if (!HasSameBase) 6510 return None; 6511 return OffsetVal; 6512 }; 6513 6514 if (GEP1) { 6515 auto Offset = getOffsetFromBase(GEP1, Ptr2); 6516 if (Offset) 6517 return -*Offset; 6518 } 6519 if (GEP2) { 6520 auto Offset = getOffsetFromBase(GEP2, Ptr1); 6521 if (Offset) 6522 return Offset; 6523 } 6524 6525 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 6526 // base. After that base, they may have some number of common (and 6527 // potentially variable) indices. After that they handle some constant 6528 // offset, which determines their offset from each other. At this point, we 6529 // handle no other case. 6530 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0)) 6531 return None; 6532 6533 // Skip any common indices and track the GEP types. 6534 unsigned Idx = 1; 6535 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 6536 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 6537 break; 6538 6539 auto Offset1 = getOffsetFromIndex(GEP1, Idx, DL); 6540 auto Offset2 = getOffsetFromIndex(GEP2, Idx, DL); 6541 if (!Offset1 || !Offset2) 6542 return None; 6543 return *Offset2 - *Offset1; 6544 } 6545