1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the primary stateless implementation of the 11 // Alias Analysis interface that implements identities (two different 12 // globals cannot alias, etc), but does no stateful analysis. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/AliasAnalysis.h" 17 #include "llvm/Analysis/Passes.h" 18 #include "llvm/Constants.h" 19 #include "llvm/DerivedTypes.h" 20 #include "llvm/Function.h" 21 #include "llvm/GlobalAlias.h" 22 #include "llvm/GlobalVariable.h" 23 #include "llvm/Instructions.h" 24 #include "llvm/IntrinsicInst.h" 25 #include "llvm/LLVMContext.h" 26 #include "llvm/Operator.h" 27 #include "llvm/Pass.h" 28 #include "llvm/Analysis/CaptureTracking.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/InstructionSimplify.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/DataLayout.h" 33 #include "llvm/Target/TargetLibraryInfo.h" 34 #include "llvm/ADT/SmallPtrSet.h" 35 #include "llvm/ADT/SmallVector.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/GetElementPtrTypeIterator.h" 38 #include <algorithm> 39 using namespace llvm; 40 41 //===----------------------------------------------------------------------===// 42 // Useful predicates 43 //===----------------------------------------------------------------------===// 44 45 /// isNonEscapingLocalObject - Return true if the pointer is to a function-local 46 /// object that never escapes from the function. 47 static bool isNonEscapingLocalObject(const Value *V) { 48 // If this is a local allocation, check to see if it escapes. 49 if (isa<AllocaInst>(V) || isNoAliasCall(V)) 50 // Set StoreCaptures to True so that we can assume in our callers that the 51 // pointer is not the result of a load instruction. Currently 52 // PointerMayBeCaptured doesn't have any special analysis for the 53 // StoreCaptures=false case; if it did, our callers could be refined to be 54 // more precise. 55 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 56 57 // If this is an argument that corresponds to a byval or noalias argument, 58 // then it has not escaped before entering the function. Check if it escapes 59 // inside the function. 60 if (const Argument *A = dyn_cast<Argument>(V)) 61 if (A->hasByValAttr() || A->hasNoAliasAttr()) { 62 // Don't bother analyzing arguments already known not to escape. 63 if (A->hasNoCaptureAttr()) 64 return true; 65 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 66 } 67 return false; 68 } 69 70 /// isEscapeSource - Return true if the pointer is one which would have 71 /// been considered an escape by isNonEscapingLocalObject. 72 static bool isEscapeSource(const Value *V) { 73 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V)) 74 return true; 75 76 // The load case works because isNonEscapingLocalObject considers all 77 // stores to be escapes (it passes true for the StoreCaptures argument 78 // to PointerMayBeCaptured). 79 if (isa<LoadInst>(V)) 80 return true; 81 82 return false; 83 } 84 85 /// getObjectSize - Return the size of the object specified by V, or 86 /// UnknownSize if unknown. 87 static uint64_t getObjectSize(const Value *V, const DataLayout &TD, 88 const TargetLibraryInfo &TLI, 89 bool RoundToAlign = false) { 90 uint64_t Size; 91 if (getObjectSize(V, Size, &TD, &TLI, RoundToAlign)) 92 return Size; 93 return AliasAnalysis::UnknownSize; 94 } 95 96 /// isObjectSmallerThan - Return true if we can prove that the object specified 97 /// by V is smaller than Size. 98 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 99 const DataLayout &TD, 100 const TargetLibraryInfo &TLI) { 101 // This function needs to use the aligned object size because we allow 102 // reads a bit past the end given sufficient alignment. 103 uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true); 104 105 return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size; 106 } 107 108 /// isObjectSize - Return true if we can prove that the object specified 109 /// by V has size Size. 110 static bool isObjectSize(const Value *V, uint64_t Size, 111 const DataLayout &TD, const TargetLibraryInfo &TLI) { 112 uint64_t ObjectSize = getObjectSize(V, TD, TLI); 113 return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size; 114 } 115 116 //===----------------------------------------------------------------------===// 117 // GetElementPtr Instruction Decomposition and Analysis 118 //===----------------------------------------------------------------------===// 119 120 namespace { 121 enum ExtensionKind { 122 EK_NotExtended, 123 EK_SignExt, 124 EK_ZeroExt 125 }; 126 127 struct VariableGEPIndex { 128 const Value *V; 129 ExtensionKind Extension; 130 int64_t Scale; 131 132 bool operator==(const VariableGEPIndex &Other) const { 133 return V == Other.V && Extension == Other.Extension && 134 Scale == Other.Scale; 135 } 136 137 bool operator!=(const VariableGEPIndex &Other) const { 138 return !operator==(Other); 139 } 140 }; 141 } 142 143 144 /// GetLinearExpression - Analyze the specified value as a linear expression: 145 /// "A*V + B", where A and B are constant integers. Return the scale and offset 146 /// values as APInts and return V as a Value*, and return whether we looked 147 /// through any sign or zero extends. The incoming Value is known to have 148 /// IntegerType and it may already be sign or zero extended. 149 /// 150 /// Note that this looks through extends, so the high bits may not be 151 /// represented in the result. 152 static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset, 153 ExtensionKind &Extension, 154 const DataLayout &TD, unsigned Depth) { 155 assert(V->getType()->isIntegerTy() && "Not an integer value"); 156 157 // Limit our recursion depth. 158 if (Depth == 6) { 159 Scale = 1; 160 Offset = 0; 161 return V; 162 } 163 164 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 165 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 166 switch (BOp->getOpcode()) { 167 default: break; 168 case Instruction::Or: 169 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 170 // analyze it. 171 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &TD)) 172 break; 173 // FALL THROUGH. 174 case Instruction::Add: 175 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 176 TD, Depth+1); 177 Offset += RHSC->getValue(); 178 return V; 179 case Instruction::Mul: 180 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 181 TD, Depth+1); 182 Offset *= RHSC->getValue(); 183 Scale *= RHSC->getValue(); 184 return V; 185 case Instruction::Shl: 186 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 187 TD, Depth+1); 188 Offset <<= RHSC->getValue().getLimitedValue(); 189 Scale <<= RHSC->getValue().getLimitedValue(); 190 return V; 191 } 192 } 193 } 194 195 // Since GEP indices are sign extended anyway, we don't care about the high 196 // bits of a sign or zero extended value - just scales and offsets. The 197 // extensions have to be consistent though. 198 if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) || 199 (isa<ZExtInst>(V) && Extension != EK_SignExt)) { 200 Value *CastOp = cast<CastInst>(V)->getOperand(0); 201 unsigned OldWidth = Scale.getBitWidth(); 202 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 203 Scale = Scale.trunc(SmallWidth); 204 Offset = Offset.trunc(SmallWidth); 205 Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt; 206 207 Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension, 208 TD, Depth+1); 209 Scale = Scale.zext(OldWidth); 210 Offset = Offset.zext(OldWidth); 211 212 return Result; 213 } 214 215 Scale = 1; 216 Offset = 0; 217 return V; 218 } 219 220 /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it 221 /// into a base pointer with a constant offset and a number of scaled symbolic 222 /// offsets. 223 /// 224 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale in 225 /// the VarIndices vector) are Value*'s that are known to be scaled by the 226 /// specified amount, but which may have other unrepresented high bits. As such, 227 /// the gep cannot necessarily be reconstructed from its decomposed form. 228 /// 229 /// When DataLayout is around, this function is capable of analyzing everything 230 /// that GetUnderlyingObject can look through. When not, it just looks 231 /// through pointer casts. 232 /// 233 static const Value * 234 DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, 235 SmallVectorImpl<VariableGEPIndex> &VarIndices, 236 const DataLayout *TD) { 237 // Limit recursion depth to limit compile time in crazy cases. 238 unsigned MaxLookup = 6; 239 240 BaseOffs = 0; 241 do { 242 // See if this is a bitcast or GEP. 243 const Operator *Op = dyn_cast<Operator>(V); 244 if (Op == 0) { 245 // The only non-operator case we can handle are GlobalAliases. 246 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 247 if (!GA->mayBeOverridden()) { 248 V = GA->getAliasee(); 249 continue; 250 } 251 } 252 return V; 253 } 254 255 if (Op->getOpcode() == Instruction::BitCast) { 256 V = Op->getOperand(0); 257 continue; 258 } 259 260 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 261 if (GEPOp == 0) { 262 // If it's not a GEP, hand it off to SimplifyInstruction to see if it 263 // can come up with something. This matches what GetUnderlyingObject does. 264 if (const Instruction *I = dyn_cast<Instruction>(V)) 265 // TODO: Get a DominatorTree and use it here. 266 if (const Value *Simplified = 267 SimplifyInstruction(const_cast<Instruction *>(I), TD)) { 268 V = Simplified; 269 continue; 270 } 271 272 return V; 273 } 274 275 // Don't attempt to analyze GEPs over unsized objects. 276 if (!cast<PointerType>(GEPOp->getOperand(0)->getType()) 277 ->getElementType()->isSized()) 278 return V; 279 280 // If we are lacking DataLayout information, we can't compute the offets of 281 // elements computed by GEPs. However, we can handle bitcast equivalent 282 // GEPs. 283 if (TD == 0) { 284 if (!GEPOp->hasAllZeroIndices()) 285 return V; 286 V = GEPOp->getOperand(0); 287 continue; 288 } 289 290 unsigned AS = GEPOp->getPointerAddressSpace(); 291 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 292 gep_type_iterator GTI = gep_type_begin(GEPOp); 293 for (User::const_op_iterator I = GEPOp->op_begin()+1, 294 E = GEPOp->op_end(); I != E; ++I) { 295 Value *Index = *I; 296 // Compute the (potentially symbolic) offset in bytes for this index. 297 if (StructType *STy = dyn_cast<StructType>(*GTI++)) { 298 // For a struct, add the member offset. 299 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 300 if (FieldNo == 0) continue; 301 302 BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo); 303 continue; 304 } 305 306 // For an array/pointer, add the element offset, explicitly scaled. 307 if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 308 if (CIdx->isZero()) continue; 309 BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue(); 310 continue; 311 } 312 313 uint64_t Scale = TD->getTypeAllocSize(*GTI); 314 ExtensionKind Extension = EK_NotExtended; 315 316 // If the integer type is smaller than the pointer size, it is implicitly 317 // sign extended to pointer size. 318 unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth(); 319 if (TD->getPointerSizeInBits(AS) > Width) 320 Extension = EK_SignExt; 321 322 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 323 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 324 Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, 325 *TD, 0); 326 327 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 328 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 329 BaseOffs += IndexOffset.getSExtValue()*Scale; 330 Scale *= IndexScale.getSExtValue(); 331 332 333 // If we already had an occurrence of this index variable, merge this 334 // scale into it. For example, we want to handle: 335 // A[x][x] -> x*16 + x*4 -> x*20 336 // This also ensures that 'x' only appears in the index list once. 337 for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) { 338 if (VarIndices[i].V == Index && 339 VarIndices[i].Extension == Extension) { 340 Scale += VarIndices[i].Scale; 341 VarIndices.erase(VarIndices.begin()+i); 342 break; 343 } 344 } 345 346 // Make sure that we have a scale that makes sense for this target's 347 // pointer size. 348 if (unsigned ShiftBits = 64-TD->getPointerSizeInBits(AS)) { 349 Scale <<= ShiftBits; 350 Scale = (int64_t)Scale >> ShiftBits; 351 } 352 353 if (Scale) { 354 VariableGEPIndex Entry = {Index, Extension, 355 static_cast<int64_t>(Scale)}; 356 VarIndices.push_back(Entry); 357 } 358 } 359 360 // Analyze the base pointer next. 361 V = GEPOp->getOperand(0); 362 } while (--MaxLookup); 363 364 // If the chain of expressions is too deep, just return early. 365 return V; 366 } 367 368 /// GetIndexDifference - Dest and Src are the variable indices from two 369 /// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base 370 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic 371 /// difference between the two pointers. 372 static void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest, 373 const SmallVectorImpl<VariableGEPIndex> &Src) { 374 if (Src.empty()) return; 375 376 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 377 const Value *V = Src[i].V; 378 ExtensionKind Extension = Src[i].Extension; 379 int64_t Scale = Src[i].Scale; 380 381 // Find V in Dest. This is N^2, but pointer indices almost never have more 382 // than a few variable indexes. 383 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 384 if (Dest[j].V != V || Dest[j].Extension != Extension) continue; 385 386 // If we found it, subtract off Scale V's from the entry in Dest. If it 387 // goes to zero, remove the entry. 388 if (Dest[j].Scale != Scale) 389 Dest[j].Scale -= Scale; 390 else 391 Dest.erase(Dest.begin()+j); 392 Scale = 0; 393 break; 394 } 395 396 // If we didn't consume this entry, add it to the end of the Dest list. 397 if (Scale) { 398 VariableGEPIndex Entry = { V, Extension, -Scale }; 399 Dest.push_back(Entry); 400 } 401 } 402 } 403 404 //===----------------------------------------------------------------------===// 405 // BasicAliasAnalysis Pass 406 //===----------------------------------------------------------------------===// 407 408 #ifndef NDEBUG 409 static const Function *getParent(const Value *V) { 410 if (const Instruction *inst = dyn_cast<Instruction>(V)) 411 return inst->getParent()->getParent(); 412 413 if (const Argument *arg = dyn_cast<Argument>(V)) 414 return arg->getParent(); 415 416 return NULL; 417 } 418 419 static bool notDifferentParent(const Value *O1, const Value *O2) { 420 421 const Function *F1 = getParent(O1); 422 const Function *F2 = getParent(O2); 423 424 return !F1 || !F2 || F1 == F2; 425 } 426 #endif 427 428 namespace { 429 /// BasicAliasAnalysis - This is the primary alias analysis implementation. 430 struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis { 431 static char ID; // Class identification, replacement for typeinfo 432 BasicAliasAnalysis() : ImmutablePass(ID) { 433 initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry()); 434 } 435 436 virtual void initializePass() { 437 InitializeAliasAnalysis(this); 438 } 439 440 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 441 AU.addRequired<AliasAnalysis>(); 442 AU.addRequired<TargetLibraryInfo>(); 443 } 444 445 virtual AliasResult alias(const Location &LocA, 446 const Location &LocB) { 447 assert(AliasCache.empty() && "AliasCache must be cleared after use!"); 448 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 449 "BasicAliasAnalysis doesn't support interprocedural queries."); 450 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.TBAATag, 451 LocB.Ptr, LocB.Size, LocB.TBAATag); 452 // AliasCache rarely has more than 1 or 2 elements, always use 453 // shrink_and_clear so it quickly returns to the inline capacity of the 454 // SmallDenseMap if it ever grows larger. 455 // FIXME: This should really be shrink_to_inline_capacity_and_clear(). 456 AliasCache.shrink_and_clear(); 457 return Alias; 458 } 459 460 virtual ModRefResult getModRefInfo(ImmutableCallSite CS, 461 const Location &Loc); 462 463 virtual ModRefResult getModRefInfo(ImmutableCallSite CS1, 464 ImmutableCallSite CS2) { 465 // The AliasAnalysis base class has some smarts, lets use them. 466 return AliasAnalysis::getModRefInfo(CS1, CS2); 467 } 468 469 /// pointsToConstantMemory - Chase pointers until we find a (constant 470 /// global) or not. 471 virtual bool pointsToConstantMemory(const Location &Loc, bool OrLocal); 472 473 /// getModRefBehavior - Return the behavior when calling the given 474 /// call site. 475 virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS); 476 477 /// getModRefBehavior - Return the behavior when calling the given function. 478 /// For use when the call site is not known. 479 virtual ModRefBehavior getModRefBehavior(const Function *F); 480 481 /// getAdjustedAnalysisPointer - This method is used when a pass implements 482 /// an analysis interface through multiple inheritance. If needed, it 483 /// should override this to adjust the this pointer as needed for the 484 /// specified pass info. 485 virtual void *getAdjustedAnalysisPointer(const void *ID) { 486 if (ID == &AliasAnalysis::ID) 487 return (AliasAnalysis*)this; 488 return this; 489 } 490 491 private: 492 // AliasCache - Track alias queries to guard against recursion. 493 typedef std::pair<Location, Location> LocPair; 494 typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy; 495 AliasCacheTy AliasCache; 496 497 // Visited - Track instructions visited by pointsToConstantMemory. 498 SmallPtrSet<const Value*, 16> Visited; 499 500 // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP 501 // instruction against another. 502 AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size, 503 const MDNode *V1TBAAInfo, 504 const Value *V2, uint64_t V2Size, 505 const MDNode *V2TBAAInfo, 506 const Value *UnderlyingV1, const Value *UnderlyingV2); 507 508 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI 509 // instruction against another. 510 AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize, 511 const MDNode *PNTBAAInfo, 512 const Value *V2, uint64_t V2Size, 513 const MDNode *V2TBAAInfo); 514 515 /// aliasSelect - Disambiguate a Select instruction against another value. 516 AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize, 517 const MDNode *SITBAAInfo, 518 const Value *V2, uint64_t V2Size, 519 const MDNode *V2TBAAInfo); 520 521 AliasResult aliasCheck(const Value *V1, uint64_t V1Size, 522 const MDNode *V1TBAATag, 523 const Value *V2, uint64_t V2Size, 524 const MDNode *V2TBAATag); 525 }; 526 } // End of anonymous namespace 527 528 // Register this pass... 529 char BasicAliasAnalysis::ID = 0; 530 INITIALIZE_AG_PASS_BEGIN(BasicAliasAnalysis, AliasAnalysis, "basicaa", 531 "Basic Alias Analysis (stateless AA impl)", 532 false, true, false) 533 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 534 INITIALIZE_AG_PASS_END(BasicAliasAnalysis, AliasAnalysis, "basicaa", 535 "Basic Alias Analysis (stateless AA impl)", 536 false, true, false) 537 538 539 ImmutablePass *llvm::createBasicAliasAnalysisPass() { 540 return new BasicAliasAnalysis(); 541 } 542 543 /// pointsToConstantMemory - Returns whether the given pointer value 544 /// points to memory that is local to the function, with global constants being 545 /// considered local to all functions. 546 bool 547 BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) { 548 assert(Visited.empty() && "Visited must be cleared after use!"); 549 550 unsigned MaxLookup = 8; 551 SmallVector<const Value *, 16> Worklist; 552 Worklist.push_back(Loc.Ptr); 553 do { 554 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), TD); 555 if (!Visited.insert(V)) { 556 Visited.clear(); 557 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 558 } 559 560 // An alloca instruction defines local memory. 561 if (OrLocal && isa<AllocaInst>(V)) 562 continue; 563 564 // A global constant counts as local memory for our purposes. 565 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 566 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 567 // global to be marked constant in some modules and non-constant in 568 // others. GV may even be a declaration, not a definition. 569 if (!GV->isConstant()) { 570 Visited.clear(); 571 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 572 } 573 continue; 574 } 575 576 // If both select values point to local memory, then so does the select. 577 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 578 Worklist.push_back(SI->getTrueValue()); 579 Worklist.push_back(SI->getFalseValue()); 580 continue; 581 } 582 583 // If all values incoming to a phi node point to local memory, then so does 584 // the phi. 585 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 586 // Don't bother inspecting phi nodes with many operands. 587 if (PN->getNumIncomingValues() > MaxLookup) { 588 Visited.clear(); 589 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 590 } 591 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 592 Worklist.push_back(PN->getIncomingValue(i)); 593 continue; 594 } 595 596 // Otherwise be conservative. 597 Visited.clear(); 598 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 599 600 } while (!Worklist.empty() && --MaxLookup); 601 602 Visited.clear(); 603 return Worklist.empty(); 604 } 605 606 /// getModRefBehavior - Return the behavior when calling the given call site. 607 AliasAnalysis::ModRefBehavior 608 BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) { 609 if (CS.doesNotAccessMemory()) 610 // Can't do better than this. 611 return DoesNotAccessMemory; 612 613 ModRefBehavior Min = UnknownModRefBehavior; 614 615 // If the callsite knows it only reads memory, don't return worse 616 // than that. 617 if (CS.onlyReadsMemory()) 618 Min = OnlyReadsMemory; 619 620 // The AliasAnalysis base class has some smarts, lets use them. 621 return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min); 622 } 623 624 /// getModRefBehavior - Return the behavior when calling the given function. 625 /// For use when the call site is not known. 626 AliasAnalysis::ModRefBehavior 627 BasicAliasAnalysis::getModRefBehavior(const Function *F) { 628 // If the function declares it doesn't access memory, we can't do better. 629 if (F->doesNotAccessMemory()) 630 return DoesNotAccessMemory; 631 632 // For intrinsics, we can check the table. 633 if (unsigned iid = F->getIntrinsicID()) { 634 #define GET_INTRINSIC_MODREF_BEHAVIOR 635 #include "llvm/Intrinsics.gen" 636 #undef GET_INTRINSIC_MODREF_BEHAVIOR 637 } 638 639 ModRefBehavior Min = UnknownModRefBehavior; 640 641 // If the function declares it only reads memory, go with that. 642 if (F->onlyReadsMemory()) 643 Min = OnlyReadsMemory; 644 645 // Otherwise be conservative. 646 return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min); 647 } 648 649 /// getModRefInfo - Check to see if the specified callsite can clobber the 650 /// specified memory object. Since we only look at local properties of this 651 /// function, we really can't say much about this query. We do, however, use 652 /// simple "address taken" analysis on local objects. 653 AliasAnalysis::ModRefResult 654 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, 655 const Location &Loc) { 656 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) && 657 "AliasAnalysis query involving multiple functions!"); 658 659 const Value *Object = GetUnderlyingObject(Loc.Ptr, TD); 660 661 // If this is a tail call and Loc.Ptr points to a stack location, we know that 662 // the tail call cannot access or modify the local stack. 663 // We cannot exclude byval arguments here; these belong to the caller of 664 // the current function not to the current function, and a tail callee 665 // may reference them. 666 if (isa<AllocaInst>(Object)) 667 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) 668 if (CI->isTailCall()) 669 return NoModRef; 670 671 // If the pointer is to a locally allocated object that does not escape, 672 // then the call can not mod/ref the pointer unless the call takes the pointer 673 // as an argument, and itself doesn't capture it. 674 if (!isa<Constant>(Object) && CS.getInstruction() != Object && 675 isNonEscapingLocalObject(Object)) { 676 bool PassedAsArg = false; 677 unsigned ArgNo = 0; 678 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 679 CI != CE; ++CI, ++ArgNo) { 680 // Only look at the no-capture or byval pointer arguments. If this 681 // pointer were passed to arguments that were neither of these, then it 682 // couldn't be no-capture. 683 if (!(*CI)->getType()->isPointerTy() || 684 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo))) 685 continue; 686 687 // If this is a no-capture pointer argument, see if we can tell that it 688 // is impossible to alias the pointer we're checking. If not, we have to 689 // assume that the call could touch the pointer, even though it doesn't 690 // escape. 691 if (!isNoAlias(Location(*CI), Location(Object))) { 692 PassedAsArg = true; 693 break; 694 } 695 } 696 697 if (!PassedAsArg) 698 return NoModRef; 699 } 700 701 const TargetLibraryInfo &TLI = getAnalysis<TargetLibraryInfo>(); 702 ModRefResult Min = ModRef; 703 704 // Finally, handle specific knowledge of intrinsics. 705 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()); 706 if (II != 0) 707 switch (II->getIntrinsicID()) { 708 default: break; 709 case Intrinsic::memcpy: 710 case Intrinsic::memmove: { 711 uint64_t Len = UnknownSize; 712 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) 713 Len = LenCI->getZExtValue(); 714 Value *Dest = II->getArgOperand(0); 715 Value *Src = II->getArgOperand(1); 716 // If it can't overlap the source dest, then it doesn't modref the loc. 717 if (isNoAlias(Location(Dest, Len), Loc)) { 718 if (isNoAlias(Location(Src, Len), Loc)) 719 return NoModRef; 720 // If it can't overlap the dest, then worst case it reads the loc. 721 Min = Ref; 722 } else if (isNoAlias(Location(Src, Len), Loc)) { 723 // If it can't overlap the source, then worst case it mutates the loc. 724 Min = Mod; 725 } 726 break; 727 } 728 case Intrinsic::memset: 729 // Since memset is 'accesses arguments' only, the AliasAnalysis base class 730 // will handle it for the variable length case. 731 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) { 732 uint64_t Len = LenCI->getZExtValue(); 733 Value *Dest = II->getArgOperand(0); 734 if (isNoAlias(Location(Dest, Len), Loc)) 735 return NoModRef; 736 } 737 // We know that memset doesn't load anything. 738 Min = Mod; 739 break; 740 case Intrinsic::lifetime_start: 741 case Intrinsic::lifetime_end: 742 case Intrinsic::invariant_start: { 743 uint64_t PtrSize = 744 cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(); 745 if (isNoAlias(Location(II->getArgOperand(1), 746 PtrSize, 747 II->getMetadata(LLVMContext::MD_tbaa)), 748 Loc)) 749 return NoModRef; 750 break; 751 } 752 case Intrinsic::invariant_end: { 753 uint64_t PtrSize = 754 cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(); 755 if (isNoAlias(Location(II->getArgOperand(2), 756 PtrSize, 757 II->getMetadata(LLVMContext::MD_tbaa)), 758 Loc)) 759 return NoModRef; 760 break; 761 } 762 case Intrinsic::arm_neon_vld1: { 763 // LLVM's vld1 and vst1 intrinsics currently only support a single 764 // vector register. 765 uint64_t Size = 766 TD ? TD->getTypeStoreSize(II->getType()) : UnknownSize; 767 if (isNoAlias(Location(II->getArgOperand(0), Size, 768 II->getMetadata(LLVMContext::MD_tbaa)), 769 Loc)) 770 return NoModRef; 771 break; 772 } 773 case Intrinsic::arm_neon_vst1: { 774 uint64_t Size = 775 TD ? TD->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize; 776 if (isNoAlias(Location(II->getArgOperand(0), Size, 777 II->getMetadata(LLVMContext::MD_tbaa)), 778 Loc)) 779 return NoModRef; 780 break; 781 } 782 } 783 784 // We can bound the aliasing properties of memset_pattern16 just as we can 785 // for memcpy/memset. This is particularly important because the 786 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 787 // whenever possible. 788 else if (TLI.has(LibFunc::memset_pattern16) && 789 CS.getCalledFunction() && 790 CS.getCalledFunction()->getName() == "memset_pattern16") { 791 const Function *MS = CS.getCalledFunction(); 792 FunctionType *MemsetType = MS->getFunctionType(); 793 if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 && 794 isa<PointerType>(MemsetType->getParamType(0)) && 795 isa<PointerType>(MemsetType->getParamType(1)) && 796 isa<IntegerType>(MemsetType->getParamType(2))) { 797 uint64_t Len = UnknownSize; 798 if (const ConstantInt *LenCI = dyn_cast<ConstantInt>(CS.getArgument(2))) 799 Len = LenCI->getZExtValue(); 800 const Value *Dest = CS.getArgument(0); 801 const Value *Src = CS.getArgument(1); 802 // If it can't overlap the source dest, then it doesn't modref the loc. 803 if (isNoAlias(Location(Dest, Len), Loc)) { 804 // Always reads 16 bytes of the source. 805 if (isNoAlias(Location(Src, 16), Loc)) 806 return NoModRef; 807 // If it can't overlap the dest, then worst case it reads the loc. 808 Min = Ref; 809 // Always reads 16 bytes of the source. 810 } else if (isNoAlias(Location(Src, 16), Loc)) { 811 // If it can't overlap the source, then worst case it mutates the loc. 812 Min = Mod; 813 } 814 } 815 } 816 817 // The AliasAnalysis base class has some smarts, lets use them. 818 return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min); 819 } 820 821 static bool areVarIndicesEqual(SmallVector<VariableGEPIndex, 4> &Indices1, 822 SmallVector<VariableGEPIndex, 4> &Indices2) { 823 unsigned Size1 = Indices1.size(); 824 unsigned Size2 = Indices2.size(); 825 826 if (Size1 != Size2) 827 return false; 828 829 for (unsigned I = 0; I != Size1; ++I) 830 if (Indices1[I] != Indices2[I]) 831 return false; 832 833 return true; 834 } 835 836 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction 837 /// against another pointer. We know that V1 is a GEP, but we don't know 838 /// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD), 839 /// UnderlyingV2 is the same for V2. 840 /// 841 AliasAnalysis::AliasResult 842 BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, 843 const MDNode *V1TBAAInfo, 844 const Value *V2, uint64_t V2Size, 845 const MDNode *V2TBAAInfo, 846 const Value *UnderlyingV1, 847 const Value *UnderlyingV2) { 848 int64_t GEP1BaseOffset; 849 SmallVector<VariableGEPIndex, 4> GEP1VariableIndices; 850 851 // If we have two gep instructions with must-alias or not-alias'ing base 852 // pointers, figure out if the indexes to the GEP tell us anything about the 853 // derived pointer. 854 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 855 // Check for geps of non-aliasing underlying pointers where the offsets are 856 // identical. 857 if (V1Size == V2Size) { 858 // Do the base pointers alias assuming type and size. 859 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, 860 V1TBAAInfo, UnderlyingV2, 861 V2Size, V2TBAAInfo); 862 if (PreciseBaseAlias == NoAlias) { 863 // See if the computed offset from the common pointer tells us about the 864 // relation of the resulting pointer. 865 int64_t GEP2BaseOffset; 866 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 867 const Value *GEP2BasePtr = 868 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD); 869 const Value *GEP1BasePtr = 870 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); 871 // DecomposeGEPExpression and GetUnderlyingObject should return the 872 // same result except when DecomposeGEPExpression has no DataLayout. 873 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 874 assert(TD == 0 && 875 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 876 return MayAlias; 877 } 878 // Same offsets. 879 if (GEP1BaseOffset == GEP2BaseOffset && 880 areVarIndicesEqual(GEP1VariableIndices, GEP2VariableIndices)) 881 return NoAlias; 882 GEP1VariableIndices.clear(); 883 } 884 } 885 886 // Do the base pointers alias? 887 AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0, 888 UnderlyingV2, UnknownSize, 0); 889 890 // If we get a No or May, then return it immediately, no amount of analysis 891 // will improve this situation. 892 if (BaseAlias != MustAlias) return BaseAlias; 893 894 // Otherwise, we have a MustAlias. Since the base pointers alias each other 895 // exactly, see if the computed offset from the common pointer tells us 896 // about the relation of the resulting pointer. 897 const Value *GEP1BasePtr = 898 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); 899 900 int64_t GEP2BaseOffset; 901 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 902 const Value *GEP2BasePtr = 903 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD); 904 905 // DecomposeGEPExpression and GetUnderlyingObject should return the 906 // same result except when DecomposeGEPExpression has no DataLayout. 907 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 908 assert(TD == 0 && 909 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 910 return MayAlias; 911 } 912 913 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 914 // symbolic difference. 915 GEP1BaseOffset -= GEP2BaseOffset; 916 GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices); 917 918 } else { 919 // Check to see if these two pointers are related by the getelementptr 920 // instruction. If one pointer is a GEP with a non-zero index of the other 921 // pointer, we know they cannot alias. 922 923 // If both accesses are unknown size, we can't do anything useful here. 924 if (V1Size == UnknownSize && V2Size == UnknownSize) 925 return MayAlias; 926 927 AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, 0, 928 V2, V2Size, V2TBAAInfo); 929 if (R != MustAlias) 930 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 931 // If V2 is known not to alias GEP base pointer, then the two values 932 // cannot alias per GEP semantics: "A pointer value formed from a 933 // getelementptr instruction is associated with the addresses associated 934 // with the first operand of the getelementptr". 935 return R; 936 937 const Value *GEP1BasePtr = 938 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); 939 940 // DecomposeGEPExpression and GetUnderlyingObject should return the 941 // same result except when DecomposeGEPExpression has no DataLayout. 942 if (GEP1BasePtr != UnderlyingV1) { 943 assert(TD == 0 && 944 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 945 return MayAlias; 946 } 947 } 948 949 // In the two GEP Case, if there is no difference in the offsets of the 950 // computed pointers, the resultant pointers are a must alias. This 951 // hapens when we have two lexically identical GEP's (for example). 952 // 953 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 954 // must aliases the GEP, the end result is a must alias also. 955 if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty()) 956 return MustAlias; 957 958 // If there is a constant difference between the pointers, but the difference 959 // is less than the size of the associated memory object, then we know 960 // that the objects are partially overlapping. If the difference is 961 // greater, we know they do not overlap. 962 if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) { 963 if (GEP1BaseOffset >= 0) { 964 if (V2Size != UnknownSize) { 965 if ((uint64_t)GEP1BaseOffset < V2Size) 966 return PartialAlias; 967 return NoAlias; 968 } 969 } else { 970 if (V1Size != UnknownSize) { 971 if (-(uint64_t)GEP1BaseOffset < V1Size) 972 return PartialAlias; 973 return NoAlias; 974 } 975 } 976 } 977 978 // Try to distinguish something like &A[i][1] against &A[42][0]. 979 // Grab the least significant bit set in any of the scales. 980 if (!GEP1VariableIndices.empty()) { 981 uint64_t Modulo = 0; 982 for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i) 983 Modulo |= (uint64_t)GEP1VariableIndices[i].Scale; 984 Modulo = Modulo ^ (Modulo & (Modulo - 1)); 985 986 // We can compute the difference between the two addresses 987 // mod Modulo. Check whether that difference guarantees that the 988 // two locations do not alias. 989 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1); 990 if (V1Size != UnknownSize && V2Size != UnknownSize && 991 ModOffset >= V2Size && V1Size <= Modulo - ModOffset) 992 return NoAlias; 993 } 994 995 // Statically, we can see that the base objects are the same, but the 996 // pointers have dynamic offsets which we can't resolve. And none of our 997 // little tricks above worked. 998 // 999 // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the 1000 // practical effect of this is protecting TBAA in the case of dynamic 1001 // indices into arrays of unions or malloc'd memory. 1002 return PartialAlias; 1003 } 1004 1005 static AliasAnalysis::AliasResult 1006 MergeAliasResults(AliasAnalysis::AliasResult A, AliasAnalysis::AliasResult B) { 1007 // If the results agree, take it. 1008 if (A == B) 1009 return A; 1010 // A mix of PartialAlias and MustAlias is PartialAlias. 1011 if ((A == AliasAnalysis::PartialAlias && B == AliasAnalysis::MustAlias) || 1012 (B == AliasAnalysis::PartialAlias && A == AliasAnalysis::MustAlias)) 1013 return AliasAnalysis::PartialAlias; 1014 // Otherwise, we don't know anything. 1015 return AliasAnalysis::MayAlias; 1016 } 1017 1018 /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select 1019 /// instruction against another. 1020 AliasAnalysis::AliasResult 1021 BasicAliasAnalysis::aliasSelect(const SelectInst *SI, uint64_t SISize, 1022 const MDNode *SITBAAInfo, 1023 const Value *V2, uint64_t V2Size, 1024 const MDNode *V2TBAAInfo) { 1025 // If the values are Selects with the same condition, we can do a more precise 1026 // check: just check for aliases between the values on corresponding arms. 1027 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1028 if (SI->getCondition() == SI2->getCondition()) { 1029 AliasResult Alias = 1030 aliasCheck(SI->getTrueValue(), SISize, SITBAAInfo, 1031 SI2->getTrueValue(), V2Size, V2TBAAInfo); 1032 if (Alias == MayAlias) 1033 return MayAlias; 1034 AliasResult ThisAlias = 1035 aliasCheck(SI->getFalseValue(), SISize, SITBAAInfo, 1036 SI2->getFalseValue(), V2Size, V2TBAAInfo); 1037 return MergeAliasResults(ThisAlias, Alias); 1038 } 1039 1040 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1041 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1042 AliasResult Alias = 1043 aliasCheck(V2, V2Size, V2TBAAInfo, SI->getTrueValue(), SISize, SITBAAInfo); 1044 if (Alias == MayAlias) 1045 return MayAlias; 1046 1047 AliasResult ThisAlias = 1048 aliasCheck(V2, V2Size, V2TBAAInfo, SI->getFalseValue(), SISize, SITBAAInfo); 1049 return MergeAliasResults(ThisAlias, Alias); 1050 } 1051 1052 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction 1053 // against another. 1054 AliasAnalysis::AliasResult 1055 BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize, 1056 const MDNode *PNTBAAInfo, 1057 const Value *V2, uint64_t V2Size, 1058 const MDNode *V2TBAAInfo) { 1059 // If the values are PHIs in the same block, we can do a more precise 1060 // as well as efficient check: just check for aliases between the values 1061 // on corresponding edges. 1062 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1063 if (PN2->getParent() == PN->getParent()) { 1064 LocPair Locs(Location(PN, PNSize, PNTBAAInfo), 1065 Location(V2, V2Size, V2TBAAInfo)); 1066 if (PN > V2) 1067 std::swap(Locs.first, Locs.second); 1068 1069 AliasResult Alias = 1070 aliasCheck(PN->getIncomingValue(0), PNSize, PNTBAAInfo, 1071 PN2->getIncomingValueForBlock(PN->getIncomingBlock(0)), 1072 V2Size, V2TBAAInfo); 1073 if (Alias == MayAlias) 1074 return MayAlias; 1075 1076 // If the first source of the PHI nodes NoAlias and the other inputs are 1077 // the PHI node itself through some amount of recursion this does not add 1078 // any new information so just return NoAlias. 1079 // bb: 1080 // ptr = ptr2 + 1 1081 // loop: 1082 // ptr_phi = phi [bb, ptr], [loop, ptr_plus_one] 1083 // ptr2_phi = phi [bb, ptr2], [loop, ptr2_plus_one] 1084 // ... 1085 // ptr_plus_one = gep ptr_phi, 1 1086 // ptr2_plus_one = gep ptr2_phi, 1 1087 // We assume for the recursion that the the phis (ptr_phi, ptr2_phi) do 1088 // not alias each other. 1089 bool ArePhisAssumedNoAlias = false; 1090 AliasResult OrigAliasResult = NoAlias; 1091 if (Alias == NoAlias) { 1092 // Pretend the phis do not alias. 1093 assert(AliasCache.count(Locs) && 1094 "There must exist an entry for the phi node"); 1095 OrigAliasResult = AliasCache[Locs]; 1096 AliasCache[Locs] = NoAlias; 1097 ArePhisAssumedNoAlias = true; 1098 } 1099 1100 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { 1101 AliasResult ThisAlias = 1102 aliasCheck(PN->getIncomingValue(i), PNSize, PNTBAAInfo, 1103 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1104 V2Size, V2TBAAInfo); 1105 Alias = MergeAliasResults(ThisAlias, Alias); 1106 if (Alias == MayAlias) 1107 break; 1108 } 1109 1110 // Reset if speculation failed. 1111 if (ArePhisAssumedNoAlias && Alias != NoAlias) 1112 AliasCache[Locs] = OrigAliasResult; 1113 1114 return Alias; 1115 } 1116 1117 SmallPtrSet<Value*, 4> UniqueSrc; 1118 SmallVector<Value*, 4> V1Srcs; 1119 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1120 Value *PV1 = PN->getIncomingValue(i); 1121 if (isa<PHINode>(PV1)) 1122 // If any of the source itself is a PHI, return MayAlias conservatively 1123 // to avoid compile time explosion. The worst possible case is if both 1124 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1125 // and 'n' are the number of PHI sources. 1126 return MayAlias; 1127 if (UniqueSrc.insert(PV1)) 1128 V1Srcs.push_back(PV1); 1129 } 1130 1131 AliasResult Alias = aliasCheck(V2, V2Size, V2TBAAInfo, 1132 V1Srcs[0], PNSize, PNTBAAInfo); 1133 // Early exit if the check of the first PHI source against V2 is MayAlias. 1134 // Other results are not possible. 1135 if (Alias == MayAlias) 1136 return MayAlias; 1137 1138 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1139 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1140 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1141 Value *V = V1Srcs[i]; 1142 1143 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2TBAAInfo, 1144 V, PNSize, PNTBAAInfo); 1145 Alias = MergeAliasResults(ThisAlias, Alias); 1146 if (Alias == MayAlias) 1147 break; 1148 } 1149 1150 return Alias; 1151 } 1152 1153 // aliasCheck - Provide a bunch of ad-hoc rules to disambiguate in common cases, 1154 // such as array references. 1155 // 1156 AliasAnalysis::AliasResult 1157 BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, 1158 const MDNode *V1TBAAInfo, 1159 const Value *V2, uint64_t V2Size, 1160 const MDNode *V2TBAAInfo) { 1161 // If either of the memory references is empty, it doesn't matter what the 1162 // pointer values are. 1163 if (V1Size == 0 || V2Size == 0) 1164 return NoAlias; 1165 1166 // Strip off any casts if they exist. 1167 V1 = V1->stripPointerCasts(); 1168 V2 = V2->stripPointerCasts(); 1169 1170 // Are we checking for alias of the same value? 1171 if (V1 == V2) return MustAlias; 1172 1173 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1174 return NoAlias; // Scalars cannot alias each other 1175 1176 // Figure out what objects these things are pointing to if we can. 1177 const Value *O1 = GetUnderlyingObject(V1, TD); 1178 const Value *O2 = GetUnderlyingObject(V2, TD); 1179 1180 // Null values in the default address space don't point to any object, so they 1181 // don't alias any other pointer. 1182 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1183 if (CPN->getType()->getAddressSpace() == 0) 1184 return NoAlias; 1185 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1186 if (CPN->getType()->getAddressSpace() == 0) 1187 return NoAlias; 1188 1189 if (O1 != O2) { 1190 // If V1/V2 point to two different objects we know that we have no alias. 1191 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1192 return NoAlias; 1193 1194 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1195 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1196 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1197 return NoAlias; 1198 1199 // Arguments can't alias with local allocations or noalias calls 1200 // in the same function. 1201 if (((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) || 1202 (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1))))) 1203 return NoAlias; 1204 1205 // Most objects can't alias null. 1206 if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) || 1207 (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2))) 1208 return NoAlias; 1209 1210 // If one pointer is the result of a call/invoke or load and the other is a 1211 // non-escaping local object within the same function, then we know the 1212 // object couldn't escape to a point where the call could return it. 1213 // 1214 // Note that if the pointers are in different functions, there are a 1215 // variety of complications. A call with a nocapture argument may still 1216 // temporary store the nocapture argument's value in a temporary memory 1217 // location if that memory location doesn't escape. Or it may pass a 1218 // nocapture value to other functions as long as they don't capture it. 1219 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2)) 1220 return NoAlias; 1221 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1)) 1222 return NoAlias; 1223 } 1224 1225 // If the size of one access is larger than the entire object on the other 1226 // side, then we know such behavior is undefined and can assume no alias. 1227 if (TD) 1228 if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) || 1229 (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI))) 1230 return NoAlias; 1231 1232 // Check the cache before climbing up use-def chains. This also terminates 1233 // otherwise infinitely recursive queries. 1234 LocPair Locs(Location(V1, V1Size, V1TBAAInfo), 1235 Location(V2, V2Size, V2TBAAInfo)); 1236 if (V1 > V2) 1237 std::swap(Locs.first, Locs.second); 1238 std::pair<AliasCacheTy::iterator, bool> Pair = 1239 AliasCache.insert(std::make_pair(Locs, MayAlias)); 1240 if (!Pair.second) 1241 return Pair.first->second; 1242 1243 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the 1244 // GEP can't simplify, we don't even look at the PHI cases. 1245 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) { 1246 std::swap(V1, V2); 1247 std::swap(V1Size, V2Size); 1248 std::swap(O1, O2); 1249 } 1250 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1251 AliasResult Result = aliasGEP(GV1, V1Size, V1TBAAInfo, V2, V2Size, V2TBAAInfo, O1, O2); 1252 if (Result != MayAlias) return AliasCache[Locs] = Result; 1253 } 1254 1255 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { 1256 std::swap(V1, V2); 1257 std::swap(V1Size, V2Size); 1258 } 1259 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1260 AliasResult Result = aliasPHI(PN, V1Size, V1TBAAInfo, 1261 V2, V2Size, V2TBAAInfo); 1262 if (Result != MayAlias) return AliasCache[Locs] = Result; 1263 } 1264 1265 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { 1266 std::swap(V1, V2); 1267 std::swap(V1Size, V2Size); 1268 } 1269 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1270 AliasResult Result = aliasSelect(S1, V1Size, V1TBAAInfo, 1271 V2, V2Size, V2TBAAInfo); 1272 if (Result != MayAlias) return AliasCache[Locs] = Result; 1273 } 1274 1275 // If both pointers are pointing into the same object and one of them 1276 // accesses is accessing the entire object, then the accesses must 1277 // overlap in some way. 1278 if (TD && O1 == O2) 1279 if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) || 1280 (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI))) 1281 return AliasCache[Locs] = PartialAlias; 1282 1283 AliasResult Result = 1284 AliasAnalysis::alias(Location(V1, V1Size, V1TBAAInfo), 1285 Location(V2, V2Size, V2TBAAInfo)); 1286 return AliasCache[Locs] = Result; 1287 } 1288