1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains routines that help analyze properties that chains of 11 // computations have. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/ValueTracking.h" 16 #include "llvm/Constants.h" 17 #include "llvm/Instructions.h" 18 #include "llvm/GlobalVariable.h" 19 #include "llvm/IntrinsicInst.h" 20 #include "llvm/LLVMContext.h" 21 #include "llvm/Operator.h" 22 #include "llvm/Target/TargetData.h" 23 #include "llvm/Support/GetElementPtrTypeIterator.h" 24 #include "llvm/Support/MathExtras.h" 25 #include <cstring> 26 using namespace llvm; 27 28 /// ComputeMaskedBits - Determine which of the bits specified in Mask are 29 /// known to be either zero or one and return them in the KnownZero/KnownOne 30 /// bit sets. This code only analyzes bits in Mask, in order to short-circuit 31 /// processing. 32 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 33 /// we cannot optimize based on the assumption that it is zero without changing 34 /// it to be an explicit zero. If we don't change it to zero, other code could 35 /// optimized based on the contradictory assumption that it is non-zero. 36 /// Because instcombine aggressively folds operations with undef args anyway, 37 /// this won't lose us code quality. 38 void llvm::ComputeMaskedBits(Value *V, const APInt &Mask, 39 APInt &KnownZero, APInt &KnownOne, 40 TargetData *TD, unsigned Depth) { 41 const unsigned MaxDepth = 6; 42 assert(V && "No Value?"); 43 assert(Depth <= MaxDepth && "Limit Search Depth"); 44 unsigned BitWidth = Mask.getBitWidth(); 45 assert((V->getType()->isIntOrIntVector() || isa<PointerType>(V->getType())) && 46 "Not integer or pointer type!"); 47 assert((!TD || 48 TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) && 49 (!V->getType()->isIntOrIntVector() || 50 V->getType()->getScalarSizeInBits() == BitWidth) && 51 KnownZero.getBitWidth() == BitWidth && 52 KnownOne.getBitWidth() == BitWidth && 53 "V, Mask, KnownOne and KnownZero should have same BitWidth"); 54 55 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) { 56 // We know all of the bits for a constant! 57 KnownOne = CI->getValue() & Mask; 58 KnownZero = ~KnownOne & Mask; 59 return; 60 } 61 // Null and aggregate-zero are all-zeros. 62 if (isa<ConstantPointerNull>(V) || 63 isa<ConstantAggregateZero>(V)) { 64 KnownOne.clear(); 65 KnownZero = Mask; 66 return; 67 } 68 // Handle a constant vector by taking the intersection of the known bits of 69 // each element. 70 if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) { 71 KnownZero.set(); KnownOne.set(); 72 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 73 APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0); 74 ComputeMaskedBits(CV->getOperand(i), Mask, KnownZero2, KnownOne2, 75 TD, Depth); 76 KnownZero &= KnownZero2; 77 KnownOne &= KnownOne2; 78 } 79 return; 80 } 81 // The address of an aligned GlobalValue has trailing zeros. 82 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 83 unsigned Align = GV->getAlignment(); 84 if (Align == 0 && TD && GV->getType()->getElementType()->isSized()) 85 Align = TD->getPrefTypeAlignment(GV->getType()->getElementType()); 86 if (Align > 0) 87 KnownZero = Mask & APInt::getLowBitsSet(BitWidth, 88 CountTrailingZeros_32(Align)); 89 else 90 KnownZero.clear(); 91 KnownOne.clear(); 92 return; 93 } 94 95 KnownZero.clear(); KnownOne.clear(); // Start out not knowing anything. 96 97 if (Depth == MaxDepth || Mask == 0) 98 return; // Limit search depth. 99 100 Operator *I = dyn_cast<Operator>(V); 101 if (!I) return; 102 103 APInt KnownZero2(KnownZero), KnownOne2(KnownOne); 104 switch (I->getOpcode()) { 105 default: break; 106 case Instruction::And: { 107 // If either the LHS or the RHS are Zero, the result is zero. 108 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1); 109 APInt Mask2(Mask & ~KnownZero); 110 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, 111 Depth+1); 112 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 113 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 114 115 // Output known-1 bits are only known if set in both the LHS & RHS. 116 KnownOne &= KnownOne2; 117 // Output known-0 are known to be clear if zero in either the LHS | RHS. 118 KnownZero |= KnownZero2; 119 return; 120 } 121 case Instruction::Or: { 122 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1); 123 APInt Mask2(Mask & ~KnownOne); 124 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, 125 Depth+1); 126 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 127 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 128 129 // Output known-0 bits are only known if clear in both the LHS & RHS. 130 KnownZero &= KnownZero2; 131 // Output known-1 are known to be set if set in either the LHS | RHS. 132 KnownOne |= KnownOne2; 133 return; 134 } 135 case Instruction::Xor: { 136 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero, KnownOne, TD, Depth+1); 137 ComputeMaskedBits(I->getOperand(0), Mask, KnownZero2, KnownOne2, TD, 138 Depth+1); 139 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 140 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 141 142 // Output known-0 bits are known if clear or set in both the LHS & RHS. 143 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2); 144 // Output known-1 are known to be set if set in only one of the LHS, RHS. 145 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2); 146 KnownZero = KnownZeroOut; 147 return; 148 } 149 case Instruction::Mul: { 150 APInt Mask2 = APInt::getAllOnesValue(BitWidth); 151 ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero, KnownOne, TD,Depth+1); 152 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, 153 Depth+1); 154 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 155 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 156 157 // If low bits are zero in either operand, output low known-0 bits. 158 // Also compute a conserative estimate for high known-0 bits. 159 // More trickiness is possible, but this is sufficient for the 160 // interesting case of alignment computation. 161 KnownOne.clear(); 162 unsigned TrailZ = KnownZero.countTrailingOnes() + 163 KnownZero2.countTrailingOnes(); 164 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() + 165 KnownZero2.countLeadingOnes(), 166 BitWidth) - BitWidth; 167 168 TrailZ = std::min(TrailZ, BitWidth); 169 LeadZ = std::min(LeadZ, BitWidth); 170 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) | 171 APInt::getHighBitsSet(BitWidth, LeadZ); 172 KnownZero &= Mask; 173 return; 174 } 175 case Instruction::UDiv: { 176 // For the purposes of computing leading zeros we can conservatively 177 // treat a udiv as a logical right shift by the power of 2 known to 178 // be less than the denominator. 179 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 180 ComputeMaskedBits(I->getOperand(0), 181 AllOnes, KnownZero2, KnownOne2, TD, Depth+1); 182 unsigned LeadZ = KnownZero2.countLeadingOnes(); 183 184 KnownOne2.clear(); 185 KnownZero2.clear(); 186 ComputeMaskedBits(I->getOperand(1), 187 AllOnes, KnownZero2, KnownOne2, TD, Depth+1); 188 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros(); 189 if (RHSUnknownLeadingOnes != BitWidth) 190 LeadZ = std::min(BitWidth, 191 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1); 192 193 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ) & Mask; 194 return; 195 } 196 case Instruction::Select: 197 ComputeMaskedBits(I->getOperand(2), Mask, KnownZero, KnownOne, TD, Depth+1); 198 ComputeMaskedBits(I->getOperand(1), Mask, KnownZero2, KnownOne2, TD, 199 Depth+1); 200 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 201 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 202 203 // Only known if known in both the LHS and RHS. 204 KnownOne &= KnownOne2; 205 KnownZero &= KnownZero2; 206 return; 207 case Instruction::FPTrunc: 208 case Instruction::FPExt: 209 case Instruction::FPToUI: 210 case Instruction::FPToSI: 211 case Instruction::SIToFP: 212 case Instruction::UIToFP: 213 return; // Can't work with floating point. 214 case Instruction::PtrToInt: 215 case Instruction::IntToPtr: 216 // We can't handle these if we don't know the pointer size. 217 if (!TD) return; 218 // FALL THROUGH and handle them the same as zext/trunc. 219 case Instruction::ZExt: 220 case Instruction::Trunc: { 221 // Note that we handle pointer operands here because of inttoptr/ptrtoint 222 // which fall through here. 223 const Type *SrcTy = I->getOperand(0)->getType(); 224 unsigned SrcBitWidth = TD ? 225 TD->getTypeSizeInBits(SrcTy) : 226 SrcTy->getScalarSizeInBits(); 227 APInt MaskIn(Mask); 228 MaskIn.zextOrTrunc(SrcBitWidth); 229 KnownZero.zextOrTrunc(SrcBitWidth); 230 KnownOne.zextOrTrunc(SrcBitWidth); 231 ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD, 232 Depth+1); 233 KnownZero.zextOrTrunc(BitWidth); 234 KnownOne.zextOrTrunc(BitWidth); 235 // Any top bits are known to be zero. 236 if (BitWidth > SrcBitWidth) 237 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 238 return; 239 } 240 case Instruction::BitCast: { 241 const Type *SrcTy = I->getOperand(0)->getType(); 242 if ((SrcTy->isInteger() || isa<PointerType>(SrcTy)) && 243 // TODO: For now, not handling conversions like: 244 // (bitcast i64 %x to <2 x i32>) 245 !isa<VectorType>(I->getType())) { 246 ComputeMaskedBits(I->getOperand(0), Mask, KnownZero, KnownOne, TD, 247 Depth+1); 248 return; 249 } 250 break; 251 } 252 case Instruction::SExt: { 253 // Compute the bits in the result that are not present in the input. 254 const IntegerType *SrcTy = cast<IntegerType>(I->getOperand(0)->getType()); 255 unsigned SrcBitWidth = SrcTy->getBitWidth(); 256 257 APInt MaskIn(Mask); 258 MaskIn.trunc(SrcBitWidth); 259 KnownZero.trunc(SrcBitWidth); 260 KnownOne.trunc(SrcBitWidth); 261 ComputeMaskedBits(I->getOperand(0), MaskIn, KnownZero, KnownOne, TD, 262 Depth+1); 263 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 264 KnownZero.zext(BitWidth); 265 KnownOne.zext(BitWidth); 266 267 // If the sign bit of the input is known set or clear, then we know the 268 // top bits of the result. 269 if (KnownZero[SrcBitWidth-1]) // Input sign bit known zero 270 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 271 else if (KnownOne[SrcBitWidth-1]) // Input sign bit known set 272 KnownOne |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth); 273 return; 274 } 275 case Instruction::Shl: 276 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 277 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 278 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 279 APInt Mask2(Mask.lshr(ShiftAmt)); 280 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD, 281 Depth+1); 282 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 283 KnownZero <<= ShiftAmt; 284 KnownOne <<= ShiftAmt; 285 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); // low bits known 0 286 return; 287 } 288 break; 289 case Instruction::LShr: 290 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 291 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 292 // Compute the new bits that are at the top now. 293 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 294 295 // Unsigned shift right. 296 APInt Mask2(Mask.shl(ShiftAmt)); 297 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero,KnownOne, TD, 298 Depth+1); 299 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 300 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 301 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 302 // high bits known zero. 303 KnownZero |= APInt::getHighBitsSet(BitWidth, ShiftAmt); 304 return; 305 } 306 break; 307 case Instruction::AShr: 308 // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 309 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 310 // Compute the new bits that are at the top now. 311 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth); 312 313 // Signed shift right. 314 APInt Mask2(Mask.shl(ShiftAmt)); 315 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD, 316 Depth+1); 317 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 318 KnownZero = APIntOps::lshr(KnownZero, ShiftAmt); 319 KnownOne = APIntOps::lshr(KnownOne, ShiftAmt); 320 321 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); 322 if (KnownZero[BitWidth-ShiftAmt-1]) // New bits are known zero. 323 KnownZero |= HighBits; 324 else if (KnownOne[BitWidth-ShiftAmt-1]) // New bits are known one. 325 KnownOne |= HighBits; 326 return; 327 } 328 break; 329 case Instruction::Sub: { 330 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(I->getOperand(0))) { 331 // We know that the top bits of C-X are clear if X contains less bits 332 // than C (i.e. no wrap-around can happen). For example, 20-X is 333 // positive if we can prove that X is >= 0 and < 16. 334 if (!CLHS->getValue().isNegative()) { 335 unsigned NLZ = (CLHS->getValue()+1).countLeadingZeros(); 336 // NLZ can't be BitWidth with no sign bit 337 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1); 338 ComputeMaskedBits(I->getOperand(1), MaskV, KnownZero2, KnownOne2, 339 TD, Depth+1); 340 341 // If all of the MaskV bits are known to be zero, then we know the 342 // output top bits are zero, because we now know that the output is 343 // from [0-C]. 344 if ((KnownZero2 & MaskV) == MaskV) { 345 unsigned NLZ2 = CLHS->getValue().countLeadingZeros(); 346 // Top bits known zero. 347 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2) & Mask; 348 } 349 } 350 } 351 } 352 // fall through 353 case Instruction::Add: { 354 // If one of the operands has trailing zeros, than the bits that the 355 // other operand has in those bit positions will be preserved in the 356 // result. For an add, this works with either operand. For a subtract, 357 // this only works if the known zeros are in the right operand. 358 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 359 APInt Mask2 = APInt::getLowBitsSet(BitWidth, 360 BitWidth - Mask.countLeadingZeros()); 361 ComputeMaskedBits(I->getOperand(0), Mask2, LHSKnownZero, LHSKnownOne, TD, 362 Depth+1); 363 assert((LHSKnownZero & LHSKnownOne) == 0 && 364 "Bits known to be one AND zero?"); 365 unsigned LHSKnownZeroOut = LHSKnownZero.countTrailingOnes(); 366 367 ComputeMaskedBits(I->getOperand(1), Mask2, KnownZero2, KnownOne2, TD, 368 Depth+1); 369 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?"); 370 unsigned RHSKnownZeroOut = KnownZero2.countTrailingOnes(); 371 372 // Determine which operand has more trailing zeros, and use that 373 // many bits from the other operand. 374 if (LHSKnownZeroOut > RHSKnownZeroOut) { 375 if (I->getOpcode() == Instruction::Add) { 376 APInt Mask = APInt::getLowBitsSet(BitWidth, LHSKnownZeroOut); 377 KnownZero |= KnownZero2 & Mask; 378 KnownOne |= KnownOne2 & Mask; 379 } else { 380 // If the known zeros are in the left operand for a subtract, 381 // fall back to the minimum known zeros in both operands. 382 KnownZero |= APInt::getLowBitsSet(BitWidth, 383 std::min(LHSKnownZeroOut, 384 RHSKnownZeroOut)); 385 } 386 } else if (RHSKnownZeroOut >= LHSKnownZeroOut) { 387 APInt Mask = APInt::getLowBitsSet(BitWidth, RHSKnownZeroOut); 388 KnownZero |= LHSKnownZero & Mask; 389 KnownOne |= LHSKnownOne & Mask; 390 } 391 return; 392 } 393 case Instruction::SRem: 394 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 395 APInt RA = Rem->getValue(); 396 if (RA.isPowerOf2() || (-RA).isPowerOf2()) { 397 APInt LowBits = RA.isStrictlyPositive() ? (RA - 1) : ~RA; 398 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth); 399 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero2, KnownOne2, TD, 400 Depth+1); 401 402 // If the sign bit of the first operand is zero, the sign bit of 403 // the result is zero. If the first operand has no one bits below 404 // the second operand's single 1 bit, its sign will be zero. 405 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits)) 406 KnownZero2 |= ~LowBits; 407 408 KnownZero |= KnownZero2 & Mask; 409 410 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 411 } 412 } 413 break; 414 case Instruction::URem: { 415 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 416 APInt RA = Rem->getValue(); 417 if (RA.isPowerOf2()) { 418 APInt LowBits = (RA - 1); 419 APInt Mask2 = LowBits & Mask; 420 KnownZero |= ~LowBits & Mask; 421 ComputeMaskedBits(I->getOperand(0), Mask2, KnownZero, KnownOne, TD, 422 Depth+1); 423 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?"); 424 break; 425 } 426 } 427 428 // Since the result is less than or equal to either operand, any leading 429 // zero bits in either operand must also exist in the result. 430 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 431 ComputeMaskedBits(I->getOperand(0), AllOnes, KnownZero, KnownOne, 432 TD, Depth+1); 433 ComputeMaskedBits(I->getOperand(1), AllOnes, KnownZero2, KnownOne2, 434 TD, Depth+1); 435 436 unsigned Leaders = std::max(KnownZero.countLeadingOnes(), 437 KnownZero2.countLeadingOnes()); 438 KnownOne.clear(); 439 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & Mask; 440 break; 441 } 442 443 case Instruction::Alloca: 444 case Instruction::Malloc: { 445 AllocationInst *AI = cast<AllocationInst>(V); 446 unsigned Align = AI->getAlignment(); 447 if (Align == 0 && TD) { 448 if (isa<AllocaInst>(AI)) 449 Align = TD->getABITypeAlignment(AI->getType()->getElementType()); 450 else if (isa<MallocInst>(AI)) { 451 // Malloc returns maximally aligned memory. 452 Align = TD->getABITypeAlignment(AI->getType()->getElementType()); 453 Align = 454 std::max(Align, 455 (unsigned)TD->getABITypeAlignment(Type::DoubleTy)); 456 Align = 457 std::max(Align, 458 (unsigned)TD->getABITypeAlignment(Type::Int64Ty)); 459 } 460 } 461 462 if (Align > 0) 463 KnownZero = Mask & APInt::getLowBitsSet(BitWidth, 464 CountTrailingZeros_32(Align)); 465 break; 466 } 467 case Instruction::GetElementPtr: { 468 // Analyze all of the subscripts of this getelementptr instruction 469 // to determine if we can prove known low zero bits. 470 APInt LocalMask = APInt::getAllOnesValue(BitWidth); 471 APInt LocalKnownZero(BitWidth, 0), LocalKnownOne(BitWidth, 0); 472 ComputeMaskedBits(I->getOperand(0), LocalMask, 473 LocalKnownZero, LocalKnownOne, TD, Depth+1); 474 unsigned TrailZ = LocalKnownZero.countTrailingOnes(); 475 476 gep_type_iterator GTI = gep_type_begin(I); 477 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 478 Value *Index = I->getOperand(i); 479 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 480 // Handle struct member offset arithmetic. 481 if (!TD) return; 482 const StructLayout *SL = TD->getStructLayout(STy); 483 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 484 uint64_t Offset = SL->getElementOffset(Idx); 485 TrailZ = std::min(TrailZ, 486 CountTrailingZeros_64(Offset)); 487 } else { 488 // Handle array index arithmetic. 489 const Type *IndexedTy = GTI.getIndexedType(); 490 if (!IndexedTy->isSized()) return; 491 unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits(); 492 uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1; 493 LocalMask = APInt::getAllOnesValue(GEPOpiBits); 494 LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0); 495 ComputeMaskedBits(Index, LocalMask, 496 LocalKnownZero, LocalKnownOne, TD, Depth+1); 497 TrailZ = std::min(TrailZ, 498 unsigned(CountTrailingZeros_64(TypeSize) + 499 LocalKnownZero.countTrailingOnes())); 500 } 501 } 502 503 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) & Mask; 504 break; 505 } 506 case Instruction::PHI: { 507 PHINode *P = cast<PHINode>(I); 508 // Handle the case of a simple two-predecessor recurrence PHI. 509 // There's a lot more that could theoretically be done here, but 510 // this is sufficient to catch some interesting cases. 511 if (P->getNumIncomingValues() == 2) { 512 for (unsigned i = 0; i != 2; ++i) { 513 Value *L = P->getIncomingValue(i); 514 Value *R = P->getIncomingValue(!i); 515 Operator *LU = dyn_cast<Operator>(L); 516 if (!LU) 517 continue; 518 unsigned Opcode = LU->getOpcode(); 519 // Check for operations that have the property that if 520 // both their operands have low zero bits, the result 521 // will have low zero bits. 522 if (Opcode == Instruction::Add || 523 Opcode == Instruction::Sub || 524 Opcode == Instruction::And || 525 Opcode == Instruction::Or || 526 Opcode == Instruction::Mul) { 527 Value *LL = LU->getOperand(0); 528 Value *LR = LU->getOperand(1); 529 // Find a recurrence. 530 if (LL == I) 531 L = LR; 532 else if (LR == I) 533 L = LL; 534 else 535 break; 536 // Ok, we have a PHI of the form L op= R. Check for low 537 // zero bits. 538 APInt Mask2 = APInt::getAllOnesValue(BitWidth); 539 ComputeMaskedBits(R, Mask2, KnownZero2, KnownOne2, TD, Depth+1); 540 Mask2 = APInt::getLowBitsSet(BitWidth, 541 KnownZero2.countTrailingOnes()); 542 543 // We need to take the minimum number of known bits 544 APInt KnownZero3(KnownZero), KnownOne3(KnownOne); 545 ComputeMaskedBits(L, Mask2, KnownZero3, KnownOne3, TD, Depth+1); 546 547 KnownZero = Mask & 548 APInt::getLowBitsSet(BitWidth, 549 std::min(KnownZero2.countTrailingOnes(), 550 KnownZero3.countTrailingOnes())); 551 break; 552 } 553 } 554 } 555 556 // Otherwise take the unions of the known bit sets of the operands, 557 // taking conservative care to avoid excessive recursion. 558 if (Depth < MaxDepth - 1 && !KnownZero && !KnownOne) { 559 KnownZero = APInt::getAllOnesValue(BitWidth); 560 KnownOne = APInt::getAllOnesValue(BitWidth); 561 for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) { 562 // Skip direct self references. 563 if (P->getIncomingValue(i) == P) continue; 564 565 KnownZero2 = APInt(BitWidth, 0); 566 KnownOne2 = APInt(BitWidth, 0); 567 // Recurse, but cap the recursion to one level, because we don't 568 // want to waste time spinning around in loops. 569 ComputeMaskedBits(P->getIncomingValue(i), KnownZero | KnownOne, 570 KnownZero2, KnownOne2, TD, MaxDepth-1); 571 KnownZero &= KnownZero2; 572 KnownOne &= KnownOne2; 573 // If all bits have been ruled out, there's no need to check 574 // more operands. 575 if (!KnownZero && !KnownOne) 576 break; 577 } 578 } 579 break; 580 } 581 case Instruction::Call: 582 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 583 switch (II->getIntrinsicID()) { 584 default: break; 585 case Intrinsic::ctpop: 586 case Intrinsic::ctlz: 587 case Intrinsic::cttz: { 588 unsigned LowBits = Log2_32(BitWidth)+1; 589 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits); 590 break; 591 } 592 } 593 } 594 break; 595 } 596 } 597 598 /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use 599 /// this predicate to simplify operations downstream. Mask is known to be zero 600 /// for bits that V cannot have. 601 bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, 602 TargetData *TD, unsigned Depth) { 603 APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); 604 ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth); 605 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); 606 return (KnownZero & Mask) == Mask; 607 } 608 609 610 611 /// ComputeNumSignBits - Return the number of times the sign bit of the 612 /// register is replicated into the other bits. We know that at least 1 bit 613 /// is always equal to the sign bit (itself), but other cases can give us 614 /// information. For example, immediately after an "ashr X, 2", we know that 615 /// the top 3 bits are all equal to each other, so we return 3. 616 /// 617 /// 'Op' must have a scalar integer type. 618 /// 619 unsigned llvm::ComputeNumSignBits(Value *V, TargetData *TD, unsigned Depth) { 620 assert((TD || V->getType()->isIntOrIntVector()) && 621 "ComputeNumSignBits requires a TargetData object to operate " 622 "on non-integer values!"); 623 const Type *Ty = V->getType(); 624 unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) : 625 Ty->getScalarSizeInBits(); 626 unsigned Tmp, Tmp2; 627 unsigned FirstAnswer = 1; 628 629 // Note that ConstantInt is handled by the general ComputeMaskedBits case 630 // below. 631 632 if (Depth == 6) 633 return 1; // Limit search depth. 634 635 Operator *U = dyn_cast<Operator>(V); 636 switch (Operator::getOpcode(V)) { 637 default: break; 638 case Instruction::SExt: 639 Tmp = TyBits-cast<IntegerType>(U->getOperand(0)->getType())->getBitWidth(); 640 return ComputeNumSignBits(U->getOperand(0), TD, Depth+1) + Tmp; 641 642 case Instruction::AShr: 643 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 644 // ashr X, C -> adds C sign bits. 645 if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) { 646 Tmp += C->getZExtValue(); 647 if (Tmp > TyBits) Tmp = TyBits; 648 } 649 return Tmp; 650 case Instruction::Shl: 651 if (ConstantInt *C = dyn_cast<ConstantInt>(U->getOperand(1))) { 652 // shl destroys sign bits. 653 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 654 if (C->getZExtValue() >= TyBits || // Bad shift. 655 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out. 656 return Tmp - C->getZExtValue(); 657 } 658 break; 659 case Instruction::And: 660 case Instruction::Or: 661 case Instruction::Xor: // NOT is handled here. 662 // Logical binary ops preserve the number of sign bits at the worst. 663 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 664 if (Tmp != 1) { 665 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 666 FirstAnswer = std::min(Tmp, Tmp2); 667 // We computed what we know about the sign bits as our first 668 // answer. Now proceed to the generic code that uses 669 // ComputeMaskedBits, and pick whichever answer is better. 670 } 671 break; 672 673 case Instruction::Select: 674 Tmp = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 675 if (Tmp == 1) return 1; // Early out. 676 Tmp2 = ComputeNumSignBits(U->getOperand(2), TD, Depth+1); 677 return std::min(Tmp, Tmp2); 678 679 case Instruction::Add: 680 // Add can have at most one carry bit. Thus we know that the output 681 // is, at worst, one more bit than the inputs. 682 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 683 if (Tmp == 1) return 1; // Early out. 684 685 // Special case decrementing a value (ADD X, -1): 686 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(U->getOperand(1))) 687 if (CRHS->isAllOnesValue()) { 688 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 689 APInt Mask = APInt::getAllOnesValue(TyBits); 690 ComputeMaskedBits(U->getOperand(0), Mask, KnownZero, KnownOne, TD, 691 Depth+1); 692 693 // If the input is known to be 0 or 1, the output is 0/-1, which is all 694 // sign bits set. 695 if ((KnownZero | APInt(TyBits, 1)) == Mask) 696 return TyBits; 697 698 // If we are subtracting one from a positive number, there is no carry 699 // out of the result. 700 if (KnownZero.isNegative()) 701 return Tmp; 702 } 703 704 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 705 if (Tmp2 == 1) return 1; 706 return std::min(Tmp, Tmp2)-1; 707 break; 708 709 case Instruction::Sub: 710 Tmp2 = ComputeNumSignBits(U->getOperand(1), TD, Depth+1); 711 if (Tmp2 == 1) return 1; 712 713 // Handle NEG. 714 if (ConstantInt *CLHS = dyn_cast<ConstantInt>(U->getOperand(0))) 715 if (CLHS->isNullValue()) { 716 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 717 APInt Mask = APInt::getAllOnesValue(TyBits); 718 ComputeMaskedBits(U->getOperand(1), Mask, KnownZero, KnownOne, 719 TD, Depth+1); 720 // If the input is known to be 0 or 1, the output is 0/-1, which is all 721 // sign bits set. 722 if ((KnownZero | APInt(TyBits, 1)) == Mask) 723 return TyBits; 724 725 // If the input is known to be positive (the sign bit is known clear), 726 // the output of the NEG has the same number of sign bits as the input. 727 if (KnownZero.isNegative()) 728 return Tmp2; 729 730 // Otherwise, we treat this like a SUB. 731 } 732 733 // Sub can have at most one carry bit. Thus we know that the output 734 // is, at worst, one more bit than the inputs. 735 Tmp = ComputeNumSignBits(U->getOperand(0), TD, Depth+1); 736 if (Tmp == 1) return 1; // Early out. 737 return std::min(Tmp, Tmp2)-1; 738 break; 739 case Instruction::Trunc: 740 // FIXME: it's tricky to do anything useful for this, but it is an important 741 // case for targets like X86. 742 break; 743 } 744 745 // Finally, if we can prove that the top bits of the result are 0's or 1's, 746 // use this information. 747 APInt KnownZero(TyBits, 0), KnownOne(TyBits, 0); 748 APInt Mask = APInt::getAllOnesValue(TyBits); 749 ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth); 750 751 if (KnownZero.isNegative()) { // sign bit is 0 752 Mask = KnownZero; 753 } else if (KnownOne.isNegative()) { // sign bit is 1; 754 Mask = KnownOne; 755 } else { 756 // Nothing known. 757 return FirstAnswer; 758 } 759 760 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine 761 // the number of identical bits in the top of the input value. 762 Mask = ~Mask; 763 Mask <<= Mask.getBitWidth()-TyBits; 764 // Return # leading zeros. We use 'min' here in case Val was zero before 765 // shifting. We don't want to return '64' as for an i32 "0". 766 return std::max(FirstAnswer, std::min(TyBits, Mask.countLeadingZeros())); 767 } 768 769 /// CannotBeNegativeZero - Return true if we can prove that the specified FP 770 /// value is never equal to -0.0. 771 /// 772 /// NOTE: this function will need to be revisited when we support non-default 773 /// rounding modes! 774 /// 775 bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) { 776 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) 777 return !CFP->getValueAPF().isNegZero(); 778 779 if (Depth == 6) 780 return 1; // Limit search depth. 781 782 const Operator *I = dyn_cast<Operator>(V); 783 if (I == 0) return false; 784 785 // (add x, 0.0) is guaranteed to return +0.0, not -0.0. 786 if (I->getOpcode() == Instruction::FAdd && 787 isa<ConstantFP>(I->getOperand(1)) && 788 cast<ConstantFP>(I->getOperand(1))->isNullValue()) 789 return true; 790 791 // sitofp and uitofp turn into +0.0 for zero. 792 if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I)) 793 return true; 794 795 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 796 // sqrt(-0.0) = -0.0, no other negative results are possible. 797 if (II->getIntrinsicID() == Intrinsic::sqrt) 798 return CannotBeNegativeZero(II->getOperand(1), Depth+1); 799 800 if (const CallInst *CI = dyn_cast<CallInst>(I)) 801 if (const Function *F = CI->getCalledFunction()) { 802 if (F->isDeclaration()) { 803 switch (F->getNameLen()) { 804 case 3: // abs(x) != -0.0 805 if (!strcmp(F->getNameStart(), "abs")) return true; 806 break; 807 case 4: // abs[lf](x) != -0.0 808 if (!strcmp(F->getNameStart(), "absf")) return true; 809 if (!strcmp(F->getNameStart(), "absl")) return true; 810 break; 811 } 812 } 813 } 814 815 return false; 816 } 817 818 // This is the recursive version of BuildSubAggregate. It takes a few different 819 // arguments. Idxs is the index within the nested struct From that we are 820 // looking at now (which is of type IndexedType). IdxSkip is the number of 821 // indices from Idxs that should be left out when inserting into the resulting 822 // struct. To is the result struct built so far, new insertvalue instructions 823 // build on that. 824 Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType, 825 SmallVector<unsigned, 10> &Idxs, 826 unsigned IdxSkip, 827 LLVMContext &Context, 828 Instruction *InsertBefore) { 829 const llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType); 830 if (STy) { 831 // Save the original To argument so we can modify it 832 Value *OrigTo = To; 833 // General case, the type indexed by Idxs is a struct 834 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 835 // Process each struct element recursively 836 Idxs.push_back(i); 837 Value *PrevTo = To; 838 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 839 Context, InsertBefore); 840 Idxs.pop_back(); 841 if (!To) { 842 // Couldn't find any inserted value for this index? Cleanup 843 while (PrevTo != OrigTo) { 844 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 845 PrevTo = Del->getAggregateOperand(); 846 Del->eraseFromParent(); 847 } 848 // Stop processing elements 849 break; 850 } 851 } 852 // If we succesfully found a value for each of our subaggregates 853 if (To) 854 return To; 855 } 856 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 857 // the struct's elements had a value that was inserted directly. In the latter 858 // case, perhaps we can't determine each of the subelements individually, but 859 // we might be able to find the complete struct somewhere. 860 861 // Find the value that is at that particular spot 862 Value *V = FindInsertedValue(From, Idxs.begin(), Idxs.end(), Context); 863 864 if (!V) 865 return NULL; 866 867 // Insert the value in the new (sub) aggregrate 868 return llvm::InsertValueInst::Create(To, V, Idxs.begin() + IdxSkip, 869 Idxs.end(), "tmp", InsertBefore); 870 } 871 872 // This helper takes a nested struct and extracts a part of it (which is again a 873 // struct) into a new value. For example, given the struct: 874 // { a, { b, { c, d }, e } } 875 // and the indices "1, 1" this returns 876 // { c, d }. 877 // 878 // It does this by inserting an insertvalue for each element in the resulting 879 // struct, as opposed to just inserting a single struct. This will only work if 880 // each of the elements of the substruct are known (ie, inserted into From by an 881 // insertvalue instruction somewhere). 882 // 883 // All inserted insertvalue instructions are inserted before InsertBefore 884 Value *BuildSubAggregate(Value *From, const unsigned *idx_begin, 885 const unsigned *idx_end, LLVMContext &Context, 886 Instruction *InsertBefore) { 887 assert(InsertBefore && "Must have someplace to insert!"); 888 const Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 889 idx_begin, 890 idx_end); 891 Value *To = Context.getUndef(IndexedType); 892 SmallVector<unsigned, 10> Idxs(idx_begin, idx_end); 893 unsigned IdxSkip = Idxs.size(); 894 895 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, 896 Context, InsertBefore); 897 } 898 899 /// FindInsertedValue - Given an aggregrate and an sequence of indices, see if 900 /// the scalar value indexed is already around as a register, for example if it 901 /// were inserted directly into the aggregrate. 902 /// 903 /// If InsertBefore is not null, this function will duplicate (modified) 904 /// insertvalues when a part of a nested struct is extracted. 905 Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin, 906 const unsigned *idx_end, LLVMContext &Context, 907 Instruction *InsertBefore) { 908 // Nothing to index? Just return V then (this is useful at the end of our 909 // recursion) 910 if (idx_begin == idx_end) 911 return V; 912 // We have indices, so V should have an indexable type 913 assert((isa<StructType>(V->getType()) || isa<ArrayType>(V->getType())) 914 && "Not looking at a struct or array?"); 915 assert(ExtractValueInst::getIndexedType(V->getType(), idx_begin, idx_end) 916 && "Invalid indices for type?"); 917 const CompositeType *PTy = cast<CompositeType>(V->getType()); 918 919 if (isa<UndefValue>(V)) 920 return Context.getUndef(ExtractValueInst::getIndexedType(PTy, 921 idx_begin, 922 idx_end)); 923 else if (isa<ConstantAggregateZero>(V)) 924 return Context.getNullValue(ExtractValueInst::getIndexedType(PTy, 925 idx_begin, 926 idx_end)); 927 else if (Constant *C = dyn_cast<Constant>(V)) { 928 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) 929 // Recursively process this constant 930 return FindInsertedValue(C->getOperand(*idx_begin), idx_begin + 1, 931 idx_end, Context, InsertBefore); 932 } else if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 933 // Loop the indices for the insertvalue instruction in parallel with the 934 // requested indices 935 const unsigned *req_idx = idx_begin; 936 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 937 i != e; ++i, ++req_idx) { 938 if (req_idx == idx_end) { 939 if (InsertBefore) 940 // The requested index identifies a part of a nested aggregate. Handle 941 // this specially. For example, 942 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 943 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 944 // %C = extractvalue {i32, { i32, i32 } } %B, 1 945 // This can be changed into 946 // %A = insertvalue {i32, i32 } undef, i32 10, 0 947 // %C = insertvalue {i32, i32 } %A, i32 11, 1 948 // which allows the unused 0,0 element from the nested struct to be 949 // removed. 950 return BuildSubAggregate(V, idx_begin, req_idx, 951 Context, InsertBefore); 952 else 953 // We can't handle this without inserting insertvalues 954 return 0; 955 } 956 957 // This insert value inserts something else than what we are looking for. 958 // See if the (aggregrate) value inserted into has the value we are 959 // looking for, then. 960 if (*req_idx != *i) 961 return FindInsertedValue(I->getAggregateOperand(), idx_begin, idx_end, 962 Context, InsertBefore); 963 } 964 // If we end up here, the indices of the insertvalue match with those 965 // requested (though possibly only partially). Now we recursively look at 966 // the inserted value, passing any remaining indices. 967 return FindInsertedValue(I->getInsertedValueOperand(), req_idx, idx_end, 968 Context, InsertBefore); 969 } else if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 970 // If we're extracting a value from an aggregrate that was extracted from 971 // something else, we can extract from that something else directly instead. 972 // However, we will need to chain I's indices with the requested indices. 973 974 // Calculate the number of indices required 975 unsigned size = I->getNumIndices() + (idx_end - idx_begin); 976 // Allocate some space to put the new indices in 977 SmallVector<unsigned, 5> Idxs; 978 Idxs.reserve(size); 979 // Add indices from the extract value instruction 980 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 981 i != e; ++i) 982 Idxs.push_back(*i); 983 984 // Add requested indices 985 for (const unsigned *i = idx_begin, *e = idx_end; i != e; ++i) 986 Idxs.push_back(*i); 987 988 assert(Idxs.size() == size 989 && "Number of indices added not correct?"); 990 991 return FindInsertedValue(I->getAggregateOperand(), Idxs.begin(), Idxs.end(), 992 Context, InsertBefore); 993 } 994 // Otherwise, we don't know (such as, extracting from a function return value 995 // or load instruction) 996 return 0; 997 } 998 999 /// GetConstantStringInfo - This function computes the length of a 1000 /// null-terminated C string pointed to by V. If successful, it returns true 1001 /// and returns the string in Str. If unsuccessful, it returns false. 1002 bool llvm::GetConstantStringInfo(Value *V, std::string &Str, uint64_t Offset, 1003 bool StopAtNul) { 1004 // If V is NULL then return false; 1005 if (V == NULL) return false; 1006 1007 // Look through bitcast instructions. 1008 if (BitCastInst *BCI = dyn_cast<BitCastInst>(V)) 1009 return GetConstantStringInfo(BCI->getOperand(0), Str, Offset, StopAtNul); 1010 1011 // If the value is not a GEP instruction nor a constant expression with a 1012 // GEP instruction, then return false because ConstantArray can't occur 1013 // any other way 1014 User *GEP = 0; 1015 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(V)) { 1016 GEP = GEPI; 1017 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) { 1018 if (CE->getOpcode() == Instruction::BitCast) 1019 return GetConstantStringInfo(CE->getOperand(0), Str, Offset, StopAtNul); 1020 if (CE->getOpcode() != Instruction::GetElementPtr) 1021 return false; 1022 GEP = CE; 1023 } 1024 1025 if (GEP) { 1026 // Make sure the GEP has exactly three arguments. 1027 if (GEP->getNumOperands() != 3) 1028 return false; 1029 1030 // Make sure the index-ee is a pointer to array of i8. 1031 const PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType()); 1032 const ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType()); 1033 if (AT == 0 || AT->getElementType() != Type::Int8Ty) 1034 return false; 1035 1036 // Check to make sure that the first operand of the GEP is an integer and 1037 // has value 0 so that we are sure we're indexing into the initializer. 1038 ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 1039 if (FirstIdx == 0 || !FirstIdx->isZero()) 1040 return false; 1041 1042 // If the second index isn't a ConstantInt, then this is a variable index 1043 // into the array. If this occurs, we can't say anything meaningful about 1044 // the string. 1045 uint64_t StartIdx = 0; 1046 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2))) 1047 StartIdx = CI->getZExtValue(); 1048 else 1049 return false; 1050 return GetConstantStringInfo(GEP->getOperand(0), Str, StartIdx+Offset, 1051 StopAtNul); 1052 } 1053 1054 // The GEP instruction, constant or instruction, must reference a global 1055 // variable that is a constant and is initialized. The referenced constant 1056 // initializer is the array that we'll use for optimization. 1057 GlobalVariable* GV = dyn_cast<GlobalVariable>(V); 1058 if (!GV || !GV->isConstant() || !GV->hasInitializer()) 1059 return false; 1060 Constant *GlobalInit = GV->getInitializer(); 1061 1062 // Handle the ConstantAggregateZero case 1063 if (isa<ConstantAggregateZero>(GlobalInit)) { 1064 // This is a degenerate case. The initializer is constant zero so the 1065 // length of the string must be zero. 1066 Str.clear(); 1067 return true; 1068 } 1069 1070 // Must be a Constant Array 1071 ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit); 1072 if (Array == 0 || Array->getType()->getElementType() != Type::Int8Ty) 1073 return false; 1074 1075 // Get the number of elements in the array 1076 uint64_t NumElts = Array->getType()->getNumElements(); 1077 1078 if (Offset > NumElts) 1079 return false; 1080 1081 // Traverse the constant array from 'Offset' which is the place the GEP refers 1082 // to in the array. 1083 Str.reserve(NumElts-Offset); 1084 for (unsigned i = Offset; i != NumElts; ++i) { 1085 Constant *Elt = Array->getOperand(i); 1086 ConstantInt *CI = dyn_cast<ConstantInt>(Elt); 1087 if (!CI) // This array isn't suitable, non-int initializer. 1088 return false; 1089 if (StopAtNul && CI->isZero()) 1090 return true; // we found end of string, success! 1091 Str += (char)CI->getZExtValue(); 1092 } 1093 1094 // The array isn't null terminated, but maybe this is a memcpy, not a strcpy. 1095 return true; 1096 } 1097