1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the primary stateless implementation of the 11 // Alias Analysis interface that implements identities (two different 12 // globals cannot alias, etc), but does no stateful analysis. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/AliasAnalysis.h" 17 #include "llvm/Analysis/Passes.h" 18 #include "llvm/Constants.h" 19 #include "llvm/DerivedTypes.h" 20 #include "llvm/Function.h" 21 #include "llvm/GlobalAlias.h" 22 #include "llvm/GlobalVariable.h" 23 #include "llvm/Instructions.h" 24 #include "llvm/IntrinsicInst.h" 25 #include "llvm/LLVMContext.h" 26 #include "llvm/Operator.h" 27 #include "llvm/Pass.h" 28 #include "llvm/Analysis/CaptureTracking.h" 29 #include "llvm/Analysis/MemoryBuiltins.h" 30 #include "llvm/Analysis/InstructionSimplify.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/Target/TargetData.h" 33 #include "llvm/Target/TargetLibraryInfo.h" 34 #include "llvm/ADT/SmallPtrSet.h" 35 #include "llvm/ADT/SmallVector.h" 36 #include "llvm/Support/ErrorHandling.h" 37 #include "llvm/Support/GetElementPtrTypeIterator.h" 38 #include <algorithm> 39 using namespace llvm; 40 41 //===----------------------------------------------------------------------===// 42 // Useful predicates 43 //===----------------------------------------------------------------------===// 44 45 /// isNonEscapingLocalObject - Return true if the pointer is to a function-local 46 /// object that never escapes from the function. 47 static bool isNonEscapingLocalObject(const Value *V) { 48 // If this is a local allocation, check to see if it escapes. 49 if (isa<AllocaInst>(V) || isNoAliasCall(V)) 50 // Set StoreCaptures to True so that we can assume in our callers that the 51 // pointer is not the result of a load instruction. Currently 52 // PointerMayBeCaptured doesn't have any special analysis for the 53 // StoreCaptures=false case; if it did, our callers could be refined to be 54 // more precise. 55 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 56 57 // If this is an argument that corresponds to a byval or noalias argument, 58 // then it has not escaped before entering the function. Check if it escapes 59 // inside the function. 60 if (const Argument *A = dyn_cast<Argument>(V)) 61 if (A->hasByValAttr() || A->hasNoAliasAttr()) { 62 // Don't bother analyzing arguments already known not to escape. 63 if (A->hasNoCaptureAttr()) 64 return true; 65 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 66 } 67 return false; 68 } 69 70 /// isEscapeSource - Return true if the pointer is one which would have 71 /// been considered an escape by isNonEscapingLocalObject. 72 static bool isEscapeSource(const Value *V) { 73 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V)) 74 return true; 75 76 // The load case works because isNonEscapingLocalObject considers all 77 // stores to be escapes (it passes true for the StoreCaptures argument 78 // to PointerMayBeCaptured). 79 if (isa<LoadInst>(V)) 80 return true; 81 82 return false; 83 } 84 85 /// getObjectSize - Return the size of the object specified by V, or 86 /// UnknownSize if unknown. 87 static uint64_t getObjectSize(const Value *V, const TargetData &TD, 88 const TargetLibraryInfo &TLI, 89 bool RoundToAlign = false) { 90 uint64_t Size; 91 if (getObjectSize(V, Size, &TD, &TLI, RoundToAlign)) 92 return Size; 93 return AliasAnalysis::UnknownSize; 94 } 95 96 /// isObjectSmallerThan - Return true if we can prove that the object specified 97 /// by V is smaller than Size. 98 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 99 const TargetData &TD, 100 const TargetLibraryInfo &TLI) { 101 // This function needs to use the aligned object size because we allow 102 // reads a bit past the end given sufficient alignment. 103 uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true); 104 105 return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size; 106 } 107 108 /// isObjectSize - Return true if we can prove that the object specified 109 /// by V has size Size. 110 static bool isObjectSize(const Value *V, uint64_t Size, 111 const TargetData &TD, const TargetLibraryInfo &TLI) { 112 uint64_t ObjectSize = getObjectSize(V, TD, TLI); 113 return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size; 114 } 115 116 //===----------------------------------------------------------------------===// 117 // GetElementPtr Instruction Decomposition and Analysis 118 //===----------------------------------------------------------------------===// 119 120 namespace { 121 enum ExtensionKind { 122 EK_NotExtended, 123 EK_SignExt, 124 EK_ZeroExt 125 }; 126 127 struct VariableGEPIndex { 128 const Value *V; 129 ExtensionKind Extension; 130 int64_t Scale; 131 }; 132 } 133 134 135 /// GetLinearExpression - Analyze the specified value as a linear expression: 136 /// "A*V + B", where A and B are constant integers. Return the scale and offset 137 /// values as APInts and return V as a Value*, and return whether we looked 138 /// through any sign or zero extends. The incoming Value is known to have 139 /// IntegerType and it may already be sign or zero extended. 140 /// 141 /// Note that this looks through extends, so the high bits may not be 142 /// represented in the result. 143 static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset, 144 ExtensionKind &Extension, 145 const TargetData &TD, unsigned Depth) { 146 assert(V->getType()->isIntegerTy() && "Not an integer value"); 147 148 // Limit our recursion depth. 149 if (Depth == 6) { 150 Scale = 1; 151 Offset = 0; 152 return V; 153 } 154 155 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 156 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 157 switch (BOp->getOpcode()) { 158 default: break; 159 case Instruction::Or: 160 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 161 // analyze it. 162 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &TD)) 163 break; 164 // FALL THROUGH. 165 case Instruction::Add: 166 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 167 TD, Depth+1); 168 Offset += RHSC->getValue(); 169 return V; 170 case Instruction::Mul: 171 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 172 TD, Depth+1); 173 Offset *= RHSC->getValue(); 174 Scale *= RHSC->getValue(); 175 return V; 176 case Instruction::Shl: 177 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 178 TD, Depth+1); 179 Offset <<= RHSC->getValue().getLimitedValue(); 180 Scale <<= RHSC->getValue().getLimitedValue(); 181 return V; 182 } 183 } 184 } 185 186 // Since GEP indices are sign extended anyway, we don't care about the high 187 // bits of a sign or zero extended value - just scales and offsets. The 188 // extensions have to be consistent though. 189 if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) || 190 (isa<ZExtInst>(V) && Extension != EK_SignExt)) { 191 Value *CastOp = cast<CastInst>(V)->getOperand(0); 192 unsigned OldWidth = Scale.getBitWidth(); 193 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 194 Scale = Scale.trunc(SmallWidth); 195 Offset = Offset.trunc(SmallWidth); 196 Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt; 197 198 Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension, 199 TD, Depth+1); 200 Scale = Scale.zext(OldWidth); 201 Offset = Offset.zext(OldWidth); 202 203 return Result; 204 } 205 206 Scale = 1; 207 Offset = 0; 208 return V; 209 } 210 211 /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it 212 /// into a base pointer with a constant offset and a number of scaled symbolic 213 /// offsets. 214 /// 215 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale in 216 /// the VarIndices vector) are Value*'s that are known to be scaled by the 217 /// specified amount, but which may have other unrepresented high bits. As such, 218 /// the gep cannot necessarily be reconstructed from its decomposed form. 219 /// 220 /// When TargetData is around, this function is capable of analyzing everything 221 /// that GetUnderlyingObject can look through. When not, it just looks 222 /// through pointer casts. 223 /// 224 static const Value * 225 DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, 226 SmallVectorImpl<VariableGEPIndex> &VarIndices, 227 const TargetData *TD) { 228 // Limit recursion depth to limit compile time in crazy cases. 229 unsigned MaxLookup = 6; 230 231 BaseOffs = 0; 232 do { 233 // See if this is a bitcast or GEP. 234 const Operator *Op = dyn_cast<Operator>(V); 235 if (Op == 0) { 236 // The only non-operator case we can handle are GlobalAliases. 237 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 238 if (!GA->mayBeOverridden()) { 239 V = GA->getAliasee(); 240 continue; 241 } 242 } 243 return V; 244 } 245 246 if (Op->getOpcode() == Instruction::BitCast) { 247 V = Op->getOperand(0); 248 continue; 249 } 250 251 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 252 if (GEPOp == 0) { 253 // If it's not a GEP, hand it off to SimplifyInstruction to see if it 254 // can come up with something. This matches what GetUnderlyingObject does. 255 if (const Instruction *I = dyn_cast<Instruction>(V)) 256 // TODO: Get a DominatorTree and use it here. 257 if (const Value *Simplified = 258 SimplifyInstruction(const_cast<Instruction *>(I), TD)) { 259 V = Simplified; 260 continue; 261 } 262 263 return V; 264 } 265 266 // Don't attempt to analyze GEPs over unsized objects. 267 if (!cast<PointerType>(GEPOp->getOperand(0)->getType()) 268 ->getElementType()->isSized()) 269 return V; 270 271 // If we are lacking TargetData information, we can't compute the offets of 272 // elements computed by GEPs. However, we can handle bitcast equivalent 273 // GEPs. 274 if (TD == 0) { 275 if (!GEPOp->hasAllZeroIndices()) 276 return V; 277 V = GEPOp->getOperand(0); 278 continue; 279 } 280 281 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 282 gep_type_iterator GTI = gep_type_begin(GEPOp); 283 for (User::const_op_iterator I = GEPOp->op_begin()+1, 284 E = GEPOp->op_end(); I != E; ++I) { 285 Value *Index = *I; 286 // Compute the (potentially symbolic) offset in bytes for this index. 287 if (StructType *STy = dyn_cast<StructType>(*GTI++)) { 288 // For a struct, add the member offset. 289 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 290 if (FieldNo == 0) continue; 291 292 BaseOffs += TD->getStructLayout(STy)->getElementOffset(FieldNo); 293 continue; 294 } 295 296 // For an array/pointer, add the element offset, explicitly scaled. 297 if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 298 if (CIdx->isZero()) continue; 299 BaseOffs += TD->getTypeAllocSize(*GTI)*CIdx->getSExtValue(); 300 continue; 301 } 302 303 uint64_t Scale = TD->getTypeAllocSize(*GTI); 304 ExtensionKind Extension = EK_NotExtended; 305 306 // If the integer type is smaller than the pointer size, it is implicitly 307 // sign extended to pointer size. 308 unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth(); 309 if (TD->getPointerSizeInBits() > Width) 310 Extension = EK_SignExt; 311 312 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 313 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 314 Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, 315 *TD, 0); 316 317 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 318 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 319 BaseOffs += IndexOffset.getSExtValue()*Scale; 320 Scale *= IndexScale.getSExtValue(); 321 322 323 // If we already had an occurrence of this index variable, merge this 324 // scale into it. For example, we want to handle: 325 // A[x][x] -> x*16 + x*4 -> x*20 326 // This also ensures that 'x' only appears in the index list once. 327 for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) { 328 if (VarIndices[i].V == Index && 329 VarIndices[i].Extension == Extension) { 330 Scale += VarIndices[i].Scale; 331 VarIndices.erase(VarIndices.begin()+i); 332 break; 333 } 334 } 335 336 // Make sure that we have a scale that makes sense for this target's 337 // pointer size. 338 if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) { 339 Scale <<= ShiftBits; 340 Scale = (int64_t)Scale >> ShiftBits; 341 } 342 343 if (Scale) { 344 VariableGEPIndex Entry = {Index, Extension, 345 static_cast<int64_t>(Scale)}; 346 VarIndices.push_back(Entry); 347 } 348 } 349 350 // Analyze the base pointer next. 351 V = GEPOp->getOperand(0); 352 } while (--MaxLookup); 353 354 // If the chain of expressions is too deep, just return early. 355 return V; 356 } 357 358 /// GetIndexDifference - Dest and Src are the variable indices from two 359 /// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base 360 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic 361 /// difference between the two pointers. 362 static void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest, 363 const SmallVectorImpl<VariableGEPIndex> &Src) { 364 if (Src.empty()) return; 365 366 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 367 const Value *V = Src[i].V; 368 ExtensionKind Extension = Src[i].Extension; 369 int64_t Scale = Src[i].Scale; 370 371 // Find V in Dest. This is N^2, but pointer indices almost never have more 372 // than a few variable indexes. 373 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 374 if (Dest[j].V != V || Dest[j].Extension != Extension) continue; 375 376 // If we found it, subtract off Scale V's from the entry in Dest. If it 377 // goes to zero, remove the entry. 378 if (Dest[j].Scale != Scale) 379 Dest[j].Scale -= Scale; 380 else 381 Dest.erase(Dest.begin()+j); 382 Scale = 0; 383 break; 384 } 385 386 // If we didn't consume this entry, add it to the end of the Dest list. 387 if (Scale) { 388 VariableGEPIndex Entry = { V, Extension, -Scale }; 389 Dest.push_back(Entry); 390 } 391 } 392 } 393 394 //===----------------------------------------------------------------------===// 395 // BasicAliasAnalysis Pass 396 //===----------------------------------------------------------------------===// 397 398 #ifndef NDEBUG 399 static const Function *getParent(const Value *V) { 400 if (const Instruction *inst = dyn_cast<Instruction>(V)) 401 return inst->getParent()->getParent(); 402 403 if (const Argument *arg = dyn_cast<Argument>(V)) 404 return arg->getParent(); 405 406 return NULL; 407 } 408 409 static bool notDifferentParent(const Value *O1, const Value *O2) { 410 411 const Function *F1 = getParent(O1); 412 const Function *F2 = getParent(O2); 413 414 return !F1 || !F2 || F1 == F2; 415 } 416 #endif 417 418 namespace { 419 /// BasicAliasAnalysis - This is the primary alias analysis implementation. 420 struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis { 421 static char ID; // Class identification, replacement for typeinfo 422 BasicAliasAnalysis() : ImmutablePass(ID), 423 // AliasCache rarely has more than 1 or 2 elements, 424 // so start it off fairly small so that clear() 425 // doesn't have to tromp through 64 (the default) 426 // elements on each alias query. This really wants 427 // something like a SmallDenseMap. 428 AliasCache(8) { 429 initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry()); 430 } 431 432 virtual void initializePass() { 433 InitializeAliasAnalysis(this); 434 } 435 436 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 437 AU.addRequired<AliasAnalysis>(); 438 AU.addRequired<TargetLibraryInfo>(); 439 } 440 441 virtual AliasResult alias(const Location &LocA, 442 const Location &LocB) { 443 assert(AliasCache.empty() && "AliasCache must be cleared after use!"); 444 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 445 "BasicAliasAnalysis doesn't support interprocedural queries."); 446 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.TBAATag, 447 LocB.Ptr, LocB.Size, LocB.TBAATag); 448 AliasCache.clear(); 449 return Alias; 450 } 451 452 virtual ModRefResult getModRefInfo(ImmutableCallSite CS, 453 const Location &Loc); 454 455 virtual ModRefResult getModRefInfo(ImmutableCallSite CS1, 456 ImmutableCallSite CS2) { 457 // The AliasAnalysis base class has some smarts, lets use them. 458 return AliasAnalysis::getModRefInfo(CS1, CS2); 459 } 460 461 /// pointsToConstantMemory - Chase pointers until we find a (constant 462 /// global) or not. 463 virtual bool pointsToConstantMemory(const Location &Loc, bool OrLocal); 464 465 /// getModRefBehavior - Return the behavior when calling the given 466 /// call site. 467 virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS); 468 469 /// getModRefBehavior - Return the behavior when calling the given function. 470 /// For use when the call site is not known. 471 virtual ModRefBehavior getModRefBehavior(const Function *F); 472 473 /// getAdjustedAnalysisPointer - This method is used when a pass implements 474 /// an analysis interface through multiple inheritance. If needed, it 475 /// should override this to adjust the this pointer as needed for the 476 /// specified pass info. 477 virtual void *getAdjustedAnalysisPointer(const void *ID) { 478 if (ID == &AliasAnalysis::ID) 479 return (AliasAnalysis*)this; 480 return this; 481 } 482 483 private: 484 // AliasCache - Track alias queries to guard against recursion. 485 typedef std::pair<Location, Location> LocPair; 486 typedef DenseMap<LocPair, AliasResult> AliasCacheTy; 487 AliasCacheTy AliasCache; 488 489 // Visited - Track instructions visited by pointsToConstantMemory. 490 SmallPtrSet<const Value*, 16> Visited; 491 492 // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP 493 // instruction against another. 494 AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size, 495 const Value *V2, uint64_t V2Size, 496 const MDNode *V2TBAAInfo, 497 const Value *UnderlyingV1, const Value *UnderlyingV2); 498 499 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI 500 // instruction against another. 501 AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize, 502 const MDNode *PNTBAAInfo, 503 const Value *V2, uint64_t V2Size, 504 const MDNode *V2TBAAInfo); 505 506 /// aliasSelect - Disambiguate a Select instruction against another value. 507 AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize, 508 const MDNode *SITBAAInfo, 509 const Value *V2, uint64_t V2Size, 510 const MDNode *V2TBAAInfo); 511 512 AliasResult aliasCheck(const Value *V1, uint64_t V1Size, 513 const MDNode *V1TBAATag, 514 const Value *V2, uint64_t V2Size, 515 const MDNode *V2TBAATag); 516 }; 517 } // End of anonymous namespace 518 519 // Register this pass... 520 char BasicAliasAnalysis::ID = 0; 521 INITIALIZE_AG_PASS_BEGIN(BasicAliasAnalysis, AliasAnalysis, "basicaa", 522 "Basic Alias Analysis (stateless AA impl)", 523 false, true, false) 524 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 525 INITIALIZE_AG_PASS_END(BasicAliasAnalysis, AliasAnalysis, "basicaa", 526 "Basic Alias Analysis (stateless AA impl)", 527 false, true, false) 528 529 530 ImmutablePass *llvm::createBasicAliasAnalysisPass() { 531 return new BasicAliasAnalysis(); 532 } 533 534 /// pointsToConstantMemory - Returns whether the given pointer value 535 /// points to memory that is local to the function, with global constants being 536 /// considered local to all functions. 537 bool 538 BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) { 539 assert(Visited.empty() && "Visited must be cleared after use!"); 540 541 unsigned MaxLookup = 8; 542 SmallVector<const Value *, 16> Worklist; 543 Worklist.push_back(Loc.Ptr); 544 do { 545 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), TD); 546 if (!Visited.insert(V)) { 547 Visited.clear(); 548 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 549 } 550 551 // An alloca instruction defines local memory. 552 if (OrLocal && isa<AllocaInst>(V)) 553 continue; 554 555 // A global constant counts as local memory for our purposes. 556 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 557 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 558 // global to be marked constant in some modules and non-constant in 559 // others. GV may even be a declaration, not a definition. 560 if (!GV->isConstant()) { 561 Visited.clear(); 562 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 563 } 564 continue; 565 } 566 567 // If both select values point to local memory, then so does the select. 568 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 569 Worklist.push_back(SI->getTrueValue()); 570 Worklist.push_back(SI->getFalseValue()); 571 continue; 572 } 573 574 // If all values incoming to a phi node point to local memory, then so does 575 // the phi. 576 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 577 // Don't bother inspecting phi nodes with many operands. 578 if (PN->getNumIncomingValues() > MaxLookup) { 579 Visited.clear(); 580 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 581 } 582 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 583 Worklist.push_back(PN->getIncomingValue(i)); 584 continue; 585 } 586 587 // Otherwise be conservative. 588 Visited.clear(); 589 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 590 591 } while (!Worklist.empty() && --MaxLookup); 592 593 Visited.clear(); 594 return Worklist.empty(); 595 } 596 597 /// getModRefBehavior - Return the behavior when calling the given call site. 598 AliasAnalysis::ModRefBehavior 599 BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) { 600 if (CS.doesNotAccessMemory()) 601 // Can't do better than this. 602 return DoesNotAccessMemory; 603 604 ModRefBehavior Min = UnknownModRefBehavior; 605 606 // If the callsite knows it only reads memory, don't return worse 607 // than that. 608 if (CS.onlyReadsMemory()) 609 Min = OnlyReadsMemory; 610 611 // The AliasAnalysis base class has some smarts, lets use them. 612 return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min); 613 } 614 615 /// getModRefBehavior - Return the behavior when calling the given function. 616 /// For use when the call site is not known. 617 AliasAnalysis::ModRefBehavior 618 BasicAliasAnalysis::getModRefBehavior(const Function *F) { 619 // If the function declares it doesn't access memory, we can't do better. 620 if (F->doesNotAccessMemory()) 621 return DoesNotAccessMemory; 622 623 // For intrinsics, we can check the table. 624 if (unsigned iid = F->getIntrinsicID()) { 625 #define GET_INTRINSIC_MODREF_BEHAVIOR 626 #include "llvm/Intrinsics.gen" 627 #undef GET_INTRINSIC_MODREF_BEHAVIOR 628 } 629 630 ModRefBehavior Min = UnknownModRefBehavior; 631 632 // If the function declares it only reads memory, go with that. 633 if (F->onlyReadsMemory()) 634 Min = OnlyReadsMemory; 635 636 // Otherwise be conservative. 637 return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min); 638 } 639 640 /// getModRefInfo - Check to see if the specified callsite can clobber the 641 /// specified memory object. Since we only look at local properties of this 642 /// function, we really can't say much about this query. We do, however, use 643 /// simple "address taken" analysis on local objects. 644 AliasAnalysis::ModRefResult 645 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, 646 const Location &Loc) { 647 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) && 648 "AliasAnalysis query involving multiple functions!"); 649 650 const Value *Object = GetUnderlyingObject(Loc.Ptr, TD); 651 652 // If this is a tail call and Loc.Ptr points to a stack location, we know that 653 // the tail call cannot access or modify the local stack. 654 // We cannot exclude byval arguments here; these belong to the caller of 655 // the current function not to the current function, and a tail callee 656 // may reference them. 657 if (isa<AllocaInst>(Object)) 658 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) 659 if (CI->isTailCall()) 660 return NoModRef; 661 662 // If the pointer is to a locally allocated object that does not escape, 663 // then the call can not mod/ref the pointer unless the call takes the pointer 664 // as an argument, and itself doesn't capture it. 665 if (!isa<Constant>(Object) && CS.getInstruction() != Object && 666 isNonEscapingLocalObject(Object)) { 667 bool PassedAsArg = false; 668 unsigned ArgNo = 0; 669 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 670 CI != CE; ++CI, ++ArgNo) { 671 // Only look at the no-capture or byval pointer arguments. If this 672 // pointer were passed to arguments that were neither of these, then it 673 // couldn't be no-capture. 674 if (!(*CI)->getType()->isPointerTy() || 675 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo))) 676 continue; 677 678 // If this is a no-capture pointer argument, see if we can tell that it 679 // is impossible to alias the pointer we're checking. If not, we have to 680 // assume that the call could touch the pointer, even though it doesn't 681 // escape. 682 if (!isNoAlias(Location(*CI), Location(Object))) { 683 PassedAsArg = true; 684 break; 685 } 686 } 687 688 if (!PassedAsArg) 689 return NoModRef; 690 } 691 692 const TargetLibraryInfo &TLI = getAnalysis<TargetLibraryInfo>(); 693 ModRefResult Min = ModRef; 694 695 // Finally, handle specific knowledge of intrinsics. 696 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()); 697 if (II != 0) 698 switch (II->getIntrinsicID()) { 699 default: break; 700 case Intrinsic::memcpy: 701 case Intrinsic::memmove: { 702 uint64_t Len = UnknownSize; 703 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) 704 Len = LenCI->getZExtValue(); 705 Value *Dest = II->getArgOperand(0); 706 Value *Src = II->getArgOperand(1); 707 // If it can't overlap the source dest, then it doesn't modref the loc. 708 if (isNoAlias(Location(Dest, Len), Loc)) { 709 if (isNoAlias(Location(Src, Len), Loc)) 710 return NoModRef; 711 // If it can't overlap the dest, then worst case it reads the loc. 712 Min = Ref; 713 } else if (isNoAlias(Location(Src, Len), Loc)) { 714 // If it can't overlap the source, then worst case it mutates the loc. 715 Min = Mod; 716 } 717 break; 718 } 719 case Intrinsic::memset: 720 // Since memset is 'accesses arguments' only, the AliasAnalysis base class 721 // will handle it for the variable length case. 722 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) { 723 uint64_t Len = LenCI->getZExtValue(); 724 Value *Dest = II->getArgOperand(0); 725 if (isNoAlias(Location(Dest, Len), Loc)) 726 return NoModRef; 727 } 728 // We know that memset doesn't load anything. 729 Min = Mod; 730 break; 731 case Intrinsic::lifetime_start: 732 case Intrinsic::lifetime_end: 733 case Intrinsic::invariant_start: { 734 uint64_t PtrSize = 735 cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(); 736 if (isNoAlias(Location(II->getArgOperand(1), 737 PtrSize, 738 II->getMetadata(LLVMContext::MD_tbaa)), 739 Loc)) 740 return NoModRef; 741 break; 742 } 743 case Intrinsic::invariant_end: { 744 uint64_t PtrSize = 745 cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(); 746 if (isNoAlias(Location(II->getArgOperand(2), 747 PtrSize, 748 II->getMetadata(LLVMContext::MD_tbaa)), 749 Loc)) 750 return NoModRef; 751 break; 752 } 753 case Intrinsic::arm_neon_vld1: { 754 // LLVM's vld1 and vst1 intrinsics currently only support a single 755 // vector register. 756 uint64_t Size = 757 TD ? TD->getTypeStoreSize(II->getType()) : UnknownSize; 758 if (isNoAlias(Location(II->getArgOperand(0), Size, 759 II->getMetadata(LLVMContext::MD_tbaa)), 760 Loc)) 761 return NoModRef; 762 break; 763 } 764 case Intrinsic::arm_neon_vst1: { 765 uint64_t Size = 766 TD ? TD->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize; 767 if (isNoAlias(Location(II->getArgOperand(0), Size, 768 II->getMetadata(LLVMContext::MD_tbaa)), 769 Loc)) 770 return NoModRef; 771 break; 772 } 773 } 774 775 // We can bound the aliasing properties of memset_pattern16 just as we can 776 // for memcpy/memset. This is particularly important because the 777 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 778 // whenever possible. 779 else if (TLI.has(LibFunc::memset_pattern16) && 780 CS.getCalledFunction() && 781 CS.getCalledFunction()->getName() == "memset_pattern16") { 782 const Function *MS = CS.getCalledFunction(); 783 FunctionType *MemsetType = MS->getFunctionType(); 784 if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 && 785 isa<PointerType>(MemsetType->getParamType(0)) && 786 isa<PointerType>(MemsetType->getParamType(1)) && 787 isa<IntegerType>(MemsetType->getParamType(2))) { 788 uint64_t Len = UnknownSize; 789 if (const ConstantInt *LenCI = dyn_cast<ConstantInt>(CS.getArgument(2))) 790 Len = LenCI->getZExtValue(); 791 const Value *Dest = CS.getArgument(0); 792 const Value *Src = CS.getArgument(1); 793 // If it can't overlap the source dest, then it doesn't modref the loc. 794 if (isNoAlias(Location(Dest, Len), Loc)) { 795 // Always reads 16 bytes of the source. 796 if (isNoAlias(Location(Src, 16), Loc)) 797 return NoModRef; 798 // If it can't overlap the dest, then worst case it reads the loc. 799 Min = Ref; 800 // Always reads 16 bytes of the source. 801 } else if (isNoAlias(Location(Src, 16), Loc)) { 802 // If it can't overlap the source, then worst case it mutates the loc. 803 Min = Mod; 804 } 805 } 806 } 807 808 // The AliasAnalysis base class has some smarts, lets use them. 809 return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min); 810 } 811 812 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction 813 /// against another pointer. We know that V1 is a GEP, but we don't know 814 /// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD), 815 /// UnderlyingV2 is the same for V2. 816 /// 817 AliasAnalysis::AliasResult 818 BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, 819 const Value *V2, uint64_t V2Size, 820 const MDNode *V2TBAAInfo, 821 const Value *UnderlyingV1, 822 const Value *UnderlyingV2) { 823 int64_t GEP1BaseOffset; 824 SmallVector<VariableGEPIndex, 4> GEP1VariableIndices; 825 826 // If we have two gep instructions with must-alias'ing base pointers, figure 827 // out if the indexes to the GEP tell us anything about the derived pointer. 828 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 829 // Do the base pointers alias? 830 AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0, 831 UnderlyingV2, UnknownSize, 0); 832 833 // If we get a No or May, then return it immediately, no amount of analysis 834 // will improve this situation. 835 if (BaseAlias != MustAlias) return BaseAlias; 836 837 // Otherwise, we have a MustAlias. Since the base pointers alias each other 838 // exactly, see if the computed offset from the common pointer tells us 839 // about the relation of the resulting pointer. 840 const Value *GEP1BasePtr = 841 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); 842 843 int64_t GEP2BaseOffset; 844 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 845 const Value *GEP2BasePtr = 846 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD); 847 848 // If DecomposeGEPExpression isn't able to look all the way through the 849 // addressing operation, we must not have TD and this is too complex for us 850 // to handle without it. 851 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 852 assert(TD == 0 && 853 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 854 return MayAlias; 855 } 856 857 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 858 // symbolic difference. 859 GEP1BaseOffset -= GEP2BaseOffset; 860 GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices); 861 862 } else { 863 // Check to see if these two pointers are related by the getelementptr 864 // instruction. If one pointer is a GEP with a non-zero index of the other 865 // pointer, we know they cannot alias. 866 867 // If both accesses are unknown size, we can't do anything useful here. 868 if (V1Size == UnknownSize && V2Size == UnknownSize) 869 return MayAlias; 870 871 AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, 0, 872 V2, V2Size, V2TBAAInfo); 873 if (R != MustAlias) 874 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 875 // If V2 is known not to alias GEP base pointer, then the two values 876 // cannot alias per GEP semantics: "A pointer value formed from a 877 // getelementptr instruction is associated with the addresses associated 878 // with the first operand of the getelementptr". 879 return R; 880 881 const Value *GEP1BasePtr = 882 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); 883 884 // If DecomposeGEPExpression isn't able to look all the way through the 885 // addressing operation, we must not have TD and this is too complex for us 886 // to handle without it. 887 if (GEP1BasePtr != UnderlyingV1) { 888 assert(TD == 0 && 889 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 890 return MayAlias; 891 } 892 } 893 894 // In the two GEP Case, if there is no difference in the offsets of the 895 // computed pointers, the resultant pointers are a must alias. This 896 // hapens when we have two lexically identical GEP's (for example). 897 // 898 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 899 // must aliases the GEP, the end result is a must alias also. 900 if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty()) 901 return MustAlias; 902 903 // If there is a constant difference between the pointers, but the difference 904 // is less than the size of the associated memory object, then we know 905 // that the objects are partially overlapping. If the difference is 906 // greater, we know they do not overlap. 907 if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) { 908 if (GEP1BaseOffset >= 0) { 909 if (V2Size != UnknownSize) { 910 if ((uint64_t)GEP1BaseOffset < V2Size) 911 return PartialAlias; 912 return NoAlias; 913 } 914 } else { 915 if (V1Size != UnknownSize) { 916 if (-(uint64_t)GEP1BaseOffset < V1Size) 917 return PartialAlias; 918 return NoAlias; 919 } 920 } 921 } 922 923 // Try to distinguish something like &A[i][1] against &A[42][0]. 924 // Grab the least significant bit set in any of the scales. 925 if (!GEP1VariableIndices.empty()) { 926 uint64_t Modulo = 0; 927 for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i) 928 Modulo |= (uint64_t)GEP1VariableIndices[i].Scale; 929 Modulo = Modulo ^ (Modulo & (Modulo - 1)); 930 931 // We can compute the difference between the two addresses 932 // mod Modulo. Check whether that difference guarantees that the 933 // two locations do not alias. 934 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1); 935 if (V1Size != UnknownSize && V2Size != UnknownSize && 936 ModOffset >= V2Size && V1Size <= Modulo - ModOffset) 937 return NoAlias; 938 } 939 940 // Statically, we can see that the base objects are the same, but the 941 // pointers have dynamic offsets which we can't resolve. And none of our 942 // little tricks above worked. 943 // 944 // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the 945 // practical effect of this is protecting TBAA in the case of dynamic 946 // indices into arrays of unions or malloc'd memory. 947 return PartialAlias; 948 } 949 950 static AliasAnalysis::AliasResult 951 MergeAliasResults(AliasAnalysis::AliasResult A, AliasAnalysis::AliasResult B) { 952 // If the results agree, take it. 953 if (A == B) 954 return A; 955 // A mix of PartialAlias and MustAlias is PartialAlias. 956 if ((A == AliasAnalysis::PartialAlias && B == AliasAnalysis::MustAlias) || 957 (B == AliasAnalysis::PartialAlias && A == AliasAnalysis::MustAlias)) 958 return AliasAnalysis::PartialAlias; 959 // Otherwise, we don't know anything. 960 return AliasAnalysis::MayAlias; 961 } 962 963 /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select 964 /// instruction against another. 965 AliasAnalysis::AliasResult 966 BasicAliasAnalysis::aliasSelect(const SelectInst *SI, uint64_t SISize, 967 const MDNode *SITBAAInfo, 968 const Value *V2, uint64_t V2Size, 969 const MDNode *V2TBAAInfo) { 970 // If the values are Selects with the same condition, we can do a more precise 971 // check: just check for aliases between the values on corresponding arms. 972 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 973 if (SI->getCondition() == SI2->getCondition()) { 974 AliasResult Alias = 975 aliasCheck(SI->getTrueValue(), SISize, SITBAAInfo, 976 SI2->getTrueValue(), V2Size, V2TBAAInfo); 977 if (Alias == MayAlias) 978 return MayAlias; 979 AliasResult ThisAlias = 980 aliasCheck(SI->getFalseValue(), SISize, SITBAAInfo, 981 SI2->getFalseValue(), V2Size, V2TBAAInfo); 982 return MergeAliasResults(ThisAlias, Alias); 983 } 984 985 // If both arms of the Select node NoAlias or MustAlias V2, then returns 986 // NoAlias / MustAlias. Otherwise, returns MayAlias. 987 AliasResult Alias = 988 aliasCheck(V2, V2Size, V2TBAAInfo, SI->getTrueValue(), SISize, SITBAAInfo); 989 if (Alias == MayAlias) 990 return MayAlias; 991 992 AliasResult ThisAlias = 993 aliasCheck(V2, V2Size, V2TBAAInfo, SI->getFalseValue(), SISize, SITBAAInfo); 994 return MergeAliasResults(ThisAlias, Alias); 995 } 996 997 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction 998 // against another. 999 AliasAnalysis::AliasResult 1000 BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize, 1001 const MDNode *PNTBAAInfo, 1002 const Value *V2, uint64_t V2Size, 1003 const MDNode *V2TBAAInfo) { 1004 // If the values are PHIs in the same block, we can do a more precise 1005 // as well as efficient check: just check for aliases between the values 1006 // on corresponding edges. 1007 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1008 if (PN2->getParent() == PN->getParent()) { 1009 AliasResult Alias = 1010 aliasCheck(PN->getIncomingValue(0), PNSize, PNTBAAInfo, 1011 PN2->getIncomingValueForBlock(PN->getIncomingBlock(0)), 1012 V2Size, V2TBAAInfo); 1013 if (Alias == MayAlias) 1014 return MayAlias; 1015 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { 1016 AliasResult ThisAlias = 1017 aliasCheck(PN->getIncomingValue(i), PNSize, PNTBAAInfo, 1018 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1019 V2Size, V2TBAAInfo); 1020 Alias = MergeAliasResults(ThisAlias, Alias); 1021 if (Alias == MayAlias) 1022 break; 1023 } 1024 return Alias; 1025 } 1026 1027 SmallPtrSet<Value*, 4> UniqueSrc; 1028 SmallVector<Value*, 4> V1Srcs; 1029 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1030 Value *PV1 = PN->getIncomingValue(i); 1031 if (isa<PHINode>(PV1)) 1032 // If any of the source itself is a PHI, return MayAlias conservatively 1033 // to avoid compile time explosion. The worst possible case is if both 1034 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1035 // and 'n' are the number of PHI sources. 1036 return MayAlias; 1037 if (UniqueSrc.insert(PV1)) 1038 V1Srcs.push_back(PV1); 1039 } 1040 1041 AliasResult Alias = aliasCheck(V2, V2Size, V2TBAAInfo, 1042 V1Srcs[0], PNSize, PNTBAAInfo); 1043 // Early exit if the check of the first PHI source against V2 is MayAlias. 1044 // Other results are not possible. 1045 if (Alias == MayAlias) 1046 return MayAlias; 1047 1048 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1049 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1050 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1051 Value *V = V1Srcs[i]; 1052 1053 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2TBAAInfo, 1054 V, PNSize, PNTBAAInfo); 1055 Alias = MergeAliasResults(ThisAlias, Alias); 1056 if (Alias == MayAlias) 1057 break; 1058 } 1059 1060 return Alias; 1061 } 1062 1063 // aliasCheck - Provide a bunch of ad-hoc rules to disambiguate in common cases, 1064 // such as array references. 1065 // 1066 AliasAnalysis::AliasResult 1067 BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, 1068 const MDNode *V1TBAAInfo, 1069 const Value *V2, uint64_t V2Size, 1070 const MDNode *V2TBAAInfo) { 1071 // If either of the memory references is empty, it doesn't matter what the 1072 // pointer values are. 1073 if (V1Size == 0 || V2Size == 0) 1074 return NoAlias; 1075 1076 // Strip off any casts if they exist. 1077 V1 = V1->stripPointerCasts(); 1078 V2 = V2->stripPointerCasts(); 1079 1080 // Are we checking for alias of the same value? 1081 if (V1 == V2) return MustAlias; 1082 1083 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1084 return NoAlias; // Scalars cannot alias each other 1085 1086 // Figure out what objects these things are pointing to if we can. 1087 const Value *O1 = GetUnderlyingObject(V1, TD); 1088 const Value *O2 = GetUnderlyingObject(V2, TD); 1089 1090 // Null values in the default address space don't point to any object, so they 1091 // don't alias any other pointer. 1092 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1093 if (CPN->getType()->getAddressSpace() == 0) 1094 return NoAlias; 1095 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1096 if (CPN->getType()->getAddressSpace() == 0) 1097 return NoAlias; 1098 1099 if (O1 != O2) { 1100 // If V1/V2 point to two different objects we know that we have no alias. 1101 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1102 return NoAlias; 1103 1104 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1105 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1106 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1107 return NoAlias; 1108 1109 // Arguments can't alias with local allocations or noalias calls 1110 // in the same function. 1111 if (((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) || 1112 (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1))))) 1113 return NoAlias; 1114 1115 // Most objects can't alias null. 1116 if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) || 1117 (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2))) 1118 return NoAlias; 1119 1120 // If one pointer is the result of a call/invoke or load and the other is a 1121 // non-escaping local object within the same function, then we know the 1122 // object couldn't escape to a point where the call could return it. 1123 // 1124 // Note that if the pointers are in different functions, there are a 1125 // variety of complications. A call with a nocapture argument may still 1126 // temporary store the nocapture argument's value in a temporary memory 1127 // location if that memory location doesn't escape. Or it may pass a 1128 // nocapture value to other functions as long as they don't capture it. 1129 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2)) 1130 return NoAlias; 1131 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1)) 1132 return NoAlias; 1133 } 1134 1135 // If the size of one access is larger than the entire object on the other 1136 // side, then we know such behavior is undefined and can assume no alias. 1137 if (TD) 1138 if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) || 1139 (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI))) 1140 return NoAlias; 1141 1142 // Check the cache before climbing up use-def chains. This also terminates 1143 // otherwise infinitely recursive queries. 1144 LocPair Locs(Location(V1, V1Size, V1TBAAInfo), 1145 Location(V2, V2Size, V2TBAAInfo)); 1146 if (V1 > V2) 1147 std::swap(Locs.first, Locs.second); 1148 std::pair<AliasCacheTy::iterator, bool> Pair = 1149 AliasCache.insert(std::make_pair(Locs, MayAlias)); 1150 if (!Pair.second) 1151 return Pair.first->second; 1152 1153 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the 1154 // GEP can't simplify, we don't even look at the PHI cases. 1155 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) { 1156 std::swap(V1, V2); 1157 std::swap(V1Size, V2Size); 1158 std::swap(O1, O2); 1159 } 1160 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1161 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, V2TBAAInfo, O1, O2); 1162 if (Result != MayAlias) return AliasCache[Locs] = Result; 1163 } 1164 1165 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { 1166 std::swap(V1, V2); 1167 std::swap(V1Size, V2Size); 1168 } 1169 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1170 AliasResult Result = aliasPHI(PN, V1Size, V1TBAAInfo, 1171 V2, V2Size, V2TBAAInfo); 1172 if (Result != MayAlias) return AliasCache[Locs] = Result; 1173 } 1174 1175 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { 1176 std::swap(V1, V2); 1177 std::swap(V1Size, V2Size); 1178 } 1179 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1180 AliasResult Result = aliasSelect(S1, V1Size, V1TBAAInfo, 1181 V2, V2Size, V2TBAAInfo); 1182 if (Result != MayAlias) return AliasCache[Locs] = Result; 1183 } 1184 1185 // If both pointers are pointing into the same object and one of them 1186 // accesses is accessing the entire object, then the accesses must 1187 // overlap in some way. 1188 if (TD && O1 == O2) 1189 if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) || 1190 (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI))) 1191 return AliasCache[Locs] = PartialAlias; 1192 1193 AliasResult Result = 1194 AliasAnalysis::alias(Location(V1, V1Size, V1TBAAInfo), 1195 Location(V2, V2Size, V2TBAAInfo)); 1196 return AliasCache[Locs] = Result; 1197 } 1198