1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the primary stateless implementation of the 11 // Alias Analysis interface that implements identities (two different 12 // globals cannot alias, etc), but does no stateful analysis. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/Passes.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/Analysis/AssumptionCache.h" 21 #include "llvm/Analysis/CFG.h" 22 #include "llvm/Analysis/CaptureTracking.h" 23 #include "llvm/Analysis/InstructionSimplify.h" 24 #include "llvm/Analysis/LoopInfo.h" 25 #include "llvm/Analysis/MemoryBuiltins.h" 26 #include "llvm/Analysis/TargetLibraryInfo.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/Constants.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Dominators.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/GetElementPtrTypeIterator.h" 34 #include "llvm/IR/GlobalAlias.h" 35 #include "llvm/IR/GlobalVariable.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/LLVMContext.h" 39 #include "llvm/IR/Operator.h" 40 #include "llvm/Pass.h" 41 #include "llvm/Support/ErrorHandling.h" 42 #include <algorithm> 43 using namespace llvm; 44 45 /// Enable analysis of recursive PHI nodes. 46 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", 47 cl::Hidden, cl::init(false)); 48 49 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 50 /// in a cycle. Because we are analysing 'through' phi nodes we need to be 51 /// careful with value equivalence. We use reachability to make sure a value 52 /// cannot be involved in a cycle. 53 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 54 55 // The max limit of the search depth in DecomposeGEPExpression() and 56 // GetUnderlyingObject(), both functions need to use the same search 57 // depth otherwise the algorithm in aliasGEP will assert. 58 static const unsigned MaxLookupSearchDepth = 6; 59 60 //===----------------------------------------------------------------------===// 61 // Useful predicates 62 //===----------------------------------------------------------------------===// 63 64 /// isNonEscapingLocalObject - Return true if the pointer is to a function-local 65 /// object that never escapes from the function. 66 static bool isNonEscapingLocalObject(const Value *V) { 67 // If this is a local allocation, check to see if it escapes. 68 if (isa<AllocaInst>(V) || isNoAliasCall(V)) 69 // Set StoreCaptures to True so that we can assume in our callers that the 70 // pointer is not the result of a load instruction. Currently 71 // PointerMayBeCaptured doesn't have any special analysis for the 72 // StoreCaptures=false case; if it did, our callers could be refined to be 73 // more precise. 74 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 75 76 // If this is an argument that corresponds to a byval or noalias argument, 77 // then it has not escaped before entering the function. Check if it escapes 78 // inside the function. 79 if (const Argument *A = dyn_cast<Argument>(V)) 80 if (A->hasByValAttr() || A->hasNoAliasAttr()) 81 // Note even if the argument is marked nocapture we still need to check 82 // for copies made inside the function. The nocapture attribute only 83 // specifies that there are no copies made that outlive the function. 84 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 85 86 return false; 87 } 88 89 /// isEscapeSource - Return true if the pointer is one which would have 90 /// been considered an escape by isNonEscapingLocalObject. 91 static bool isEscapeSource(const Value *V) { 92 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V)) 93 return true; 94 95 // The load case works because isNonEscapingLocalObject considers all 96 // stores to be escapes (it passes true for the StoreCaptures argument 97 // to PointerMayBeCaptured). 98 if (isa<LoadInst>(V)) 99 return true; 100 101 return false; 102 } 103 104 /// getObjectSize - Return the size of the object specified by V, or 105 /// UnknownSize if unknown. 106 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 107 const TargetLibraryInfo &TLI, 108 bool RoundToAlign = false) { 109 uint64_t Size; 110 if (getObjectSize(V, Size, DL, &TLI, RoundToAlign)) 111 return Size; 112 return MemoryLocation::UnknownSize; 113 } 114 115 /// isObjectSmallerThan - Return true if we can prove that the object specified 116 /// by V is smaller than Size. 117 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 118 const DataLayout &DL, 119 const TargetLibraryInfo &TLI) { 120 // Note that the meanings of the "object" are slightly different in the 121 // following contexts: 122 // c1: llvm::getObjectSize() 123 // c2: llvm.objectsize() intrinsic 124 // c3: isObjectSmallerThan() 125 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 126 // refers to the "entire object". 127 // 128 // Consider this example: 129 // char *p = (char*)malloc(100) 130 // char *q = p+80; 131 // 132 // In the context of c1 and c2, the "object" pointed by q refers to the 133 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 134 // 135 // However, in the context of c3, the "object" refers to the chunk of memory 136 // being allocated. So, the "object" has 100 bytes, and q points to the middle 137 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 138 // parameter, before the llvm::getObjectSize() is called to get the size of 139 // entire object, we should: 140 // - either rewind the pointer q to the base-address of the object in 141 // question (in this case rewind to p), or 142 // - just give up. It is up to caller to make sure the pointer is pointing 143 // to the base address the object. 144 // 145 // We go for 2nd option for simplicity. 146 if (!isIdentifiedObject(V)) 147 return false; 148 149 // This function needs to use the aligned object size because we allow 150 // reads a bit past the end given sufficient alignment. 151 uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/true); 152 153 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 154 } 155 156 /// isObjectSize - Return true if we can prove that the object specified 157 /// by V has size Size. 158 static bool isObjectSize(const Value *V, uint64_t Size, 159 const DataLayout &DL, const TargetLibraryInfo &TLI) { 160 uint64_t ObjectSize = getObjectSize(V, DL, TLI); 161 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 162 } 163 164 //===----------------------------------------------------------------------===// 165 // GetElementPtr Instruction Decomposition and Analysis 166 //===----------------------------------------------------------------------===// 167 168 namespace { 169 enum ExtensionKind { 170 EK_NotExtended, 171 EK_SignExt, 172 EK_ZeroExt 173 }; 174 175 struct VariableGEPIndex { 176 const Value *V; 177 ExtensionKind Extension; 178 int64_t Scale; 179 180 bool operator==(const VariableGEPIndex &Other) const { 181 return V == Other.V && Extension == Other.Extension && 182 Scale == Other.Scale; 183 } 184 185 bool operator!=(const VariableGEPIndex &Other) const { 186 return !operator==(Other); 187 } 188 }; 189 } 190 191 192 /// GetLinearExpression - Analyze the specified value as a linear expression: 193 /// "A*V + B", where A and B are constant integers. Return the scale and offset 194 /// values as APInts and return V as a Value*, and return whether we looked 195 /// through any sign or zero extends. The incoming Value is known to have 196 /// IntegerType and it may already be sign or zero extended. 197 /// 198 /// Note that this looks through extends, so the high bits may not be 199 /// represented in the result. 200 static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset, 201 ExtensionKind &Extension, 202 const DataLayout &DL, unsigned Depth, 203 AssumptionCache *AC, DominatorTree *DT) { 204 assert(V->getType()->isIntegerTy() && "Not an integer value"); 205 206 // Limit our recursion depth. 207 if (Depth == 6) { 208 Scale = 1; 209 Offset = 0; 210 return V; 211 } 212 213 if (ConstantInt *Const = dyn_cast<ConstantInt>(V)) { 214 // if it's a constant, just convert it to an offset 215 // and remove the variable. 216 Offset += Const->getValue(); 217 assert(Scale == 0 && "Constant values don't have a scale"); 218 return V; 219 } 220 221 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 222 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 223 switch (BOp->getOpcode()) { 224 default: break; 225 case Instruction::Or: 226 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 227 // analyze it. 228 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 229 BOp, DT)) 230 break; 231 // FALL THROUGH. 232 case Instruction::Add: 233 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 234 DL, Depth + 1, AC, DT); 235 Offset += RHSC->getValue(); 236 return V; 237 case Instruction::Mul: 238 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 239 DL, Depth + 1, AC, DT); 240 Offset *= RHSC->getValue(); 241 Scale *= RHSC->getValue(); 242 return V; 243 case Instruction::Shl: 244 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 245 DL, Depth + 1, AC, DT); 246 Offset <<= RHSC->getValue().getLimitedValue(); 247 Scale <<= RHSC->getValue().getLimitedValue(); 248 return V; 249 } 250 } 251 } 252 253 // Since GEP indices are sign extended anyway, we don't care about the high 254 // bits of a sign or zero extended value - just scales and offsets. The 255 // extensions have to be consistent though. 256 if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) || 257 (isa<ZExtInst>(V) && Extension != EK_SignExt)) { 258 Value *CastOp = cast<CastInst>(V)->getOperand(0); 259 unsigned OldWidth = Scale.getBitWidth(); 260 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 261 Scale = Scale.trunc(SmallWidth); 262 Offset = Offset.trunc(SmallWidth); 263 Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt; 264 265 Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension, DL, 266 Depth + 1, AC, DT); 267 Scale = Scale.zext(OldWidth); 268 269 // We have to sign-extend even if Extension == EK_ZeroExt as we can't 270 // decompose a sign extension (i.e. zext(x - 1) != zext(x) - zext(-1)). 271 Offset = Offset.sext(OldWidth); 272 273 return Result; 274 } 275 276 Scale = 1; 277 Offset = 0; 278 return V; 279 } 280 281 /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it 282 /// into a base pointer with a constant offset and a number of scaled symbolic 283 /// offsets. 284 /// 285 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale in 286 /// the VarIndices vector) are Value*'s that are known to be scaled by the 287 /// specified amount, but which may have other unrepresented high bits. As such, 288 /// the gep cannot necessarily be reconstructed from its decomposed form. 289 /// 290 /// When DataLayout is around, this function is capable of analyzing everything 291 /// that GetUnderlyingObject can look through. To be able to do that 292 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search 293 /// depth (MaxLookupSearchDepth). 294 /// When DataLayout not is around, it just looks through pointer casts. 295 /// 296 static const Value * 297 DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, 298 SmallVectorImpl<VariableGEPIndex> &VarIndices, 299 bool &MaxLookupReached, const DataLayout &DL, 300 AssumptionCache *AC, DominatorTree *DT) { 301 // Limit recursion depth to limit compile time in crazy cases. 302 unsigned MaxLookup = MaxLookupSearchDepth; 303 MaxLookupReached = false; 304 305 BaseOffs = 0; 306 do { 307 // See if this is a bitcast or GEP. 308 const Operator *Op = dyn_cast<Operator>(V); 309 if (!Op) { 310 // The only non-operator case we can handle are GlobalAliases. 311 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 312 if (!GA->mayBeOverridden()) { 313 V = GA->getAliasee(); 314 continue; 315 } 316 } 317 return V; 318 } 319 320 if (Op->getOpcode() == Instruction::BitCast || 321 Op->getOpcode() == Instruction::AddrSpaceCast) { 322 V = Op->getOperand(0); 323 continue; 324 } 325 326 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 327 if (!GEPOp) { 328 // If it's not a GEP, hand it off to SimplifyInstruction to see if it 329 // can come up with something. This matches what GetUnderlyingObject does. 330 if (const Instruction *I = dyn_cast<Instruction>(V)) 331 // TODO: Get a DominatorTree and AssumptionCache and use them here 332 // (these are both now available in this function, but this should be 333 // updated when GetUnderlyingObject is updated). TLI should be 334 // provided also. 335 if (const Value *Simplified = 336 SimplifyInstruction(const_cast<Instruction *>(I), DL)) { 337 V = Simplified; 338 continue; 339 } 340 341 return V; 342 } 343 344 // Don't attempt to analyze GEPs over unsized objects. 345 if (!GEPOp->getOperand(0)->getType()->getPointerElementType()->isSized()) 346 return V; 347 348 unsigned AS = GEPOp->getPointerAddressSpace(); 349 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 350 gep_type_iterator GTI = gep_type_begin(GEPOp); 351 for (User::const_op_iterator I = GEPOp->op_begin()+1, 352 E = GEPOp->op_end(); I != E; ++I) { 353 Value *Index = *I; 354 // Compute the (potentially symbolic) offset in bytes for this index. 355 if (StructType *STy = dyn_cast<StructType>(*GTI++)) { 356 // For a struct, add the member offset. 357 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 358 if (FieldNo == 0) continue; 359 360 BaseOffs += DL.getStructLayout(STy)->getElementOffset(FieldNo); 361 continue; 362 } 363 364 // For an array/pointer, add the element offset, explicitly scaled. 365 if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 366 if (CIdx->isZero()) continue; 367 BaseOffs += DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue(); 368 continue; 369 } 370 371 uint64_t Scale = DL.getTypeAllocSize(*GTI); 372 ExtensionKind Extension = EK_NotExtended; 373 374 // If the integer type is smaller than the pointer size, it is implicitly 375 // sign extended to pointer size. 376 unsigned Width = Index->getType()->getIntegerBitWidth(); 377 if (DL.getPointerSizeInBits(AS) > Width) 378 Extension = EK_SignExt; 379 380 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 381 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 382 Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, DL, 383 0, AC, DT); 384 385 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 386 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 387 BaseOffs += IndexOffset.getSExtValue()*Scale; 388 Scale *= IndexScale.getSExtValue(); 389 390 // If we already had an occurrence of this index variable, merge this 391 // scale into it. For example, we want to handle: 392 // A[x][x] -> x*16 + x*4 -> x*20 393 // This also ensures that 'x' only appears in the index list once. 394 for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) { 395 if (VarIndices[i].V == Index && 396 VarIndices[i].Extension == Extension) { 397 Scale += VarIndices[i].Scale; 398 VarIndices.erase(VarIndices.begin()+i); 399 break; 400 } 401 } 402 403 // Make sure that we have a scale that makes sense for this target's 404 // pointer size. 405 if (unsigned ShiftBits = 64 - DL.getPointerSizeInBits(AS)) { 406 Scale <<= ShiftBits; 407 Scale = (int64_t)Scale >> ShiftBits; 408 } 409 410 if (Scale) { 411 VariableGEPIndex Entry = {Index, Extension, 412 static_cast<int64_t>(Scale)}; 413 VarIndices.push_back(Entry); 414 } 415 } 416 417 // Analyze the base pointer next. 418 V = GEPOp->getOperand(0); 419 } while (--MaxLookup); 420 421 // If the chain of expressions is too deep, just return early. 422 MaxLookupReached = true; 423 return V; 424 } 425 426 //===----------------------------------------------------------------------===// 427 // BasicAliasAnalysis Pass 428 //===----------------------------------------------------------------------===// 429 430 #ifndef NDEBUG 431 static const Function *getParent(const Value *V) { 432 if (const Instruction *inst = dyn_cast<Instruction>(V)) 433 return inst->getParent()->getParent(); 434 435 if (const Argument *arg = dyn_cast<Argument>(V)) 436 return arg->getParent(); 437 438 return nullptr; 439 } 440 441 static bool notDifferentParent(const Value *O1, const Value *O2) { 442 443 const Function *F1 = getParent(O1); 444 const Function *F2 = getParent(O2); 445 446 return !F1 || !F2 || F1 == F2; 447 } 448 #endif 449 450 namespace { 451 /// BasicAliasAnalysis - This is the primary alias analysis implementation. 452 struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis { 453 static char ID; // Class identification, replacement for typeinfo 454 BasicAliasAnalysis() : ImmutablePass(ID) { 455 initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry()); 456 } 457 458 bool doInitialization(Module &M) override; 459 460 void getAnalysisUsage(AnalysisUsage &AU) const override { 461 AU.addRequired<AliasAnalysis>(); 462 AU.addRequired<AssumptionCacheTracker>(); 463 AU.addRequired<TargetLibraryInfoWrapperPass>(); 464 } 465 466 AliasResult alias(const MemoryLocation &LocA, 467 const MemoryLocation &LocB) override { 468 assert(AliasCache.empty() && "AliasCache must be cleared after use!"); 469 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 470 "BasicAliasAnalysis doesn't support interprocedural queries."); 471 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, 472 LocB.Ptr, LocB.Size, LocB.AATags); 473 // AliasCache rarely has more than 1 or 2 elements, always use 474 // shrink_and_clear so it quickly returns to the inline capacity of the 475 // SmallDenseMap if it ever grows larger. 476 // FIXME: This should really be shrink_to_inline_capacity_and_clear(). 477 AliasCache.shrink_and_clear(); 478 VisitedPhiBBs.clear(); 479 return Alias; 480 } 481 482 ModRefInfo getModRefInfo(ImmutableCallSite CS, 483 const MemoryLocation &Loc) override; 484 485 ModRefInfo getModRefInfo(ImmutableCallSite CS1, 486 ImmutableCallSite CS2) override; 487 488 /// pointsToConstantMemory - Chase pointers until we find a (constant 489 /// global) or not. 490 bool pointsToConstantMemory(const MemoryLocation &Loc, 491 bool OrLocal) override; 492 493 /// Get the location associated with a pointer argument of a callsite. 494 ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) override; 495 496 /// getModRefBehavior - Return the behavior when calling the given 497 /// call site. 498 FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) override; 499 500 /// getModRefBehavior - Return the behavior when calling the given function. 501 /// For use when the call site is not known. 502 FunctionModRefBehavior getModRefBehavior(const Function *F) override; 503 504 /// getAdjustedAnalysisPointer - This method is used when a pass implements 505 /// an analysis interface through multiple inheritance. If needed, it 506 /// should override this to adjust the this pointer as needed for the 507 /// specified pass info. 508 void *getAdjustedAnalysisPointer(const void *ID) override { 509 if (ID == &AliasAnalysis::ID) 510 return (AliasAnalysis*)this; 511 return this; 512 } 513 514 private: 515 // AliasCache - Track alias queries to guard against recursion. 516 typedef std::pair<MemoryLocation, MemoryLocation> LocPair; 517 typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy; 518 AliasCacheTy AliasCache; 519 520 /// \brief Track phi nodes we have visited. When interpret "Value" pointer 521 /// equality as value equality we need to make sure that the "Value" is not 522 /// part of a cycle. Otherwise, two uses could come from different 523 /// "iterations" of a cycle and see different values for the same "Value" 524 /// pointer. 525 /// The following example shows the problem: 526 /// %p = phi(%alloca1, %addr2) 527 /// %l = load %ptr 528 /// %addr1 = gep, %alloca2, 0, %l 529 /// %addr2 = gep %alloca2, 0, (%l + 1) 530 /// alias(%p, %addr1) -> MayAlias ! 531 /// store %l, ... 532 SmallPtrSet<const BasicBlock*, 8> VisitedPhiBBs; 533 534 // Visited - Track instructions visited by pointsToConstantMemory. 535 SmallPtrSet<const Value*, 16> Visited; 536 537 /// \brief Check whether two Values can be considered equivalent. 538 /// 539 /// In addition to pointer equivalence of \p V1 and \p V2 this checks 540 /// whether they can not be part of a cycle in the value graph by looking at 541 /// all visited phi nodes an making sure that the phis cannot reach the 542 /// value. We have to do this because we are looking through phi nodes (That 543 /// is we say noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 544 bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2); 545 546 /// \brief Dest and Src are the variable indices from two decomposed 547 /// GetElementPtr instructions GEP1 and GEP2 which have common base 548 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic 549 /// difference between the two pointers. 550 void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest, 551 const SmallVectorImpl<VariableGEPIndex> &Src); 552 553 // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP 554 // instruction against another. 555 AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size, 556 const AAMDNodes &V1AAInfo, 557 const Value *V2, uint64_t V2Size, 558 const AAMDNodes &V2AAInfo, 559 const Value *UnderlyingV1, const Value *UnderlyingV2); 560 561 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI 562 // instruction against another. 563 AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize, 564 const AAMDNodes &PNAAInfo, 565 const Value *V2, uint64_t V2Size, 566 const AAMDNodes &V2AAInfo); 567 568 /// aliasSelect - Disambiguate a Select instruction against another value. 569 AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize, 570 const AAMDNodes &SIAAInfo, 571 const Value *V2, uint64_t V2Size, 572 const AAMDNodes &V2AAInfo); 573 574 AliasResult aliasCheck(const Value *V1, uint64_t V1Size, 575 AAMDNodes V1AATag, 576 const Value *V2, uint64_t V2Size, 577 AAMDNodes V2AATag); 578 }; 579 } // End of anonymous namespace 580 581 // Register this pass... 582 char BasicAliasAnalysis::ID = 0; 583 INITIALIZE_AG_PASS_BEGIN(BasicAliasAnalysis, AliasAnalysis, "basicaa", 584 "Basic Alias Analysis (stateless AA impl)", 585 false, true, false) 586 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 587 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 588 INITIALIZE_AG_PASS_END(BasicAliasAnalysis, AliasAnalysis, "basicaa", 589 "Basic Alias Analysis (stateless AA impl)", 590 false, true, false) 591 592 593 ImmutablePass *llvm::createBasicAliasAnalysisPass() { 594 return new BasicAliasAnalysis(); 595 } 596 597 /// pointsToConstantMemory - Returns whether the given pointer value 598 /// points to memory that is local to the function, with global constants being 599 /// considered local to all functions. 600 bool BasicAliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc, 601 bool OrLocal) { 602 assert(Visited.empty() && "Visited must be cleared after use!"); 603 604 unsigned MaxLookup = 8; 605 SmallVector<const Value *, 16> Worklist; 606 Worklist.push_back(Loc.Ptr); 607 do { 608 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), *DL); 609 if (!Visited.insert(V).second) { 610 Visited.clear(); 611 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 612 } 613 614 // An alloca instruction defines local memory. 615 if (OrLocal && isa<AllocaInst>(V)) 616 continue; 617 618 // A global constant counts as local memory for our purposes. 619 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 620 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 621 // global to be marked constant in some modules and non-constant in 622 // others. GV may even be a declaration, not a definition. 623 if (!GV->isConstant()) { 624 Visited.clear(); 625 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 626 } 627 continue; 628 } 629 630 // If both select values point to local memory, then so does the select. 631 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 632 Worklist.push_back(SI->getTrueValue()); 633 Worklist.push_back(SI->getFalseValue()); 634 continue; 635 } 636 637 // If all values incoming to a phi node point to local memory, then so does 638 // the phi. 639 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 640 // Don't bother inspecting phi nodes with many operands. 641 if (PN->getNumIncomingValues() > MaxLookup) { 642 Visited.clear(); 643 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 644 } 645 for (Value *IncValue : PN->incoming_values()) 646 Worklist.push_back(IncValue); 647 continue; 648 } 649 650 // Otherwise be conservative. 651 Visited.clear(); 652 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 653 654 } while (!Worklist.empty() && --MaxLookup); 655 656 Visited.clear(); 657 return Worklist.empty(); 658 } 659 660 // FIXME: This code is duplicated with MemoryLocation and should be hoisted to 661 // some common utility location. 662 static bool isMemsetPattern16(const Function *MS, 663 const TargetLibraryInfo &TLI) { 664 if (TLI.has(LibFunc::memset_pattern16) && 665 MS->getName() == "memset_pattern16") { 666 FunctionType *MemsetType = MS->getFunctionType(); 667 if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 && 668 isa<PointerType>(MemsetType->getParamType(0)) && 669 isa<PointerType>(MemsetType->getParamType(1)) && 670 isa<IntegerType>(MemsetType->getParamType(2))) 671 return true; 672 } 673 674 return false; 675 } 676 677 /// getModRefBehavior - Return the behavior when calling the given call site. 678 FunctionModRefBehavior 679 BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) { 680 if (CS.doesNotAccessMemory()) 681 // Can't do better than this. 682 return FMRB_DoesNotAccessMemory; 683 684 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 685 686 // If the callsite knows it only reads memory, don't return worse 687 // than that. 688 if (CS.onlyReadsMemory()) 689 Min = FMRB_OnlyReadsMemory; 690 691 if (CS.onlyAccessesArgMemory()) 692 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 693 694 // The AliasAnalysis base class has some smarts, lets use them. 695 return FunctionModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min); 696 } 697 698 /// getModRefBehavior - Return the behavior when calling the given function. 699 /// For use when the call site is not known. 700 FunctionModRefBehavior 701 BasicAliasAnalysis::getModRefBehavior(const Function *F) { 702 // If the function declares it doesn't access memory, we can't do better. 703 if (F->doesNotAccessMemory()) 704 return FMRB_DoesNotAccessMemory; 705 706 // For intrinsics, we can check the table. 707 if (Intrinsic::ID iid = F->getIntrinsicID()) { 708 #define GET_INTRINSIC_MODREF_BEHAVIOR 709 #include "llvm/IR/Intrinsics.gen" 710 #undef GET_INTRINSIC_MODREF_BEHAVIOR 711 } 712 713 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 714 715 // If the function declares it only reads memory, go with that. 716 if (F->onlyReadsMemory()) 717 Min = FMRB_OnlyReadsMemory; 718 719 if (F->onlyAccessesArgMemory()) 720 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 721 722 const TargetLibraryInfo &TLI = 723 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 724 if (isMemsetPattern16(F, TLI)) 725 Min = FMRB_OnlyAccessesArgumentPointees; 726 727 // Otherwise be conservative. 728 return FunctionModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min); 729 } 730 731 ModRefInfo BasicAliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, 732 unsigned ArgIdx) { 733 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) 734 switch (II->getIntrinsicID()) { 735 default: 736 break; 737 case Intrinsic::memset: 738 case Intrinsic::memcpy: 739 case Intrinsic::memmove: 740 assert((ArgIdx == 0 || ArgIdx == 1) && 741 "Invalid argument index for memory intrinsic"); 742 return ArgIdx ? MRI_Ref : MRI_Mod; 743 } 744 745 // We can bound the aliasing properties of memset_pattern16 just as we can 746 // for memcpy/memset. This is particularly important because the 747 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 748 // whenever possible. 749 if (CS.getCalledFunction() && 750 isMemsetPattern16(CS.getCalledFunction(), *TLI)) { 751 assert((ArgIdx == 0 || ArgIdx == 1) && 752 "Invalid argument index for memset_pattern16"); 753 return ArgIdx ? MRI_Ref : MRI_Mod; 754 } 755 // FIXME: Handle memset_pattern4 and memset_pattern8 also. 756 757 return AliasAnalysis::getArgModRefInfo(CS, ArgIdx); 758 } 759 760 static bool isAssumeIntrinsic(ImmutableCallSite CS) { 761 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()); 762 if (II && II->getIntrinsicID() == Intrinsic::assume) 763 return true; 764 765 return false; 766 } 767 768 bool BasicAliasAnalysis::doInitialization(Module &M) { 769 InitializeAliasAnalysis(this, &M.getDataLayout()); 770 return true; 771 } 772 773 /// getModRefInfo - Check to see if the specified callsite can clobber the 774 /// specified memory object. Since we only look at local properties of this 775 /// function, we really can't say much about this query. We do, however, use 776 /// simple "address taken" analysis on local objects. 777 ModRefInfo BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, 778 const MemoryLocation &Loc) { 779 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) && 780 "AliasAnalysis query involving multiple functions!"); 781 782 const Value *Object = GetUnderlyingObject(Loc.Ptr, *DL); 783 784 // If this is a tail call and Loc.Ptr points to a stack location, we know that 785 // the tail call cannot access or modify the local stack. 786 // We cannot exclude byval arguments here; these belong to the caller of 787 // the current function not to the current function, and a tail callee 788 // may reference them. 789 if (isa<AllocaInst>(Object)) 790 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) 791 if (CI->isTailCall()) 792 return MRI_NoModRef; 793 794 // If the pointer is to a locally allocated object that does not escape, 795 // then the call can not mod/ref the pointer unless the call takes the pointer 796 // as an argument, and itself doesn't capture it. 797 if (!isa<Constant>(Object) && CS.getInstruction() != Object && 798 isNonEscapingLocalObject(Object)) { 799 bool PassedAsArg = false; 800 unsigned ArgNo = 0; 801 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 802 CI != CE; ++CI, ++ArgNo) { 803 // Only look at the no-capture or byval pointer arguments. If this 804 // pointer were passed to arguments that were neither of these, then it 805 // couldn't be no-capture. 806 if (!(*CI)->getType()->isPointerTy() || 807 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo))) 808 continue; 809 810 // If this is a no-capture pointer argument, see if we can tell that it 811 // is impossible to alias the pointer we're checking. If not, we have to 812 // assume that the call could touch the pointer, even though it doesn't 813 // escape. 814 if (!isNoAlias(MemoryLocation(*CI), MemoryLocation(Object))) { 815 PassedAsArg = true; 816 break; 817 } 818 } 819 820 if (!PassedAsArg) 821 return MRI_NoModRef; 822 } 823 824 // While the assume intrinsic is marked as arbitrarily writing so that 825 // proper control dependencies will be maintained, it never aliases any 826 // particular memory location. 827 if (isAssumeIntrinsic(CS)) 828 return MRI_NoModRef; 829 830 // The AliasAnalysis base class has some smarts, lets use them. 831 return AliasAnalysis::getModRefInfo(CS, Loc); 832 } 833 834 ModRefInfo BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS1, 835 ImmutableCallSite CS2) { 836 // While the assume intrinsic is marked as arbitrarily writing so that 837 // proper control dependencies will be maintained, it never aliases any 838 // particular memory location. 839 if (isAssumeIntrinsic(CS1) || isAssumeIntrinsic(CS2)) 840 return MRI_NoModRef; 841 842 // The AliasAnalysis base class has some smarts, lets use them. 843 return AliasAnalysis::getModRefInfo(CS1, CS2); 844 } 845 846 /// \brief Provide ad-hoc rules to disambiguate accesses through two GEP 847 /// operators, both having the exact same pointer operand. 848 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, 849 uint64_t V1Size, 850 const GEPOperator *GEP2, 851 uint64_t V2Size, 852 const DataLayout &DL) { 853 854 assert(GEP1->getPointerOperand() == GEP2->getPointerOperand() && 855 "Expected GEPs with the same pointer operand"); 856 857 // Try to determine whether GEP1 and GEP2 index through arrays, into structs, 858 // such that the struct field accesses provably cannot alias. 859 // We also need at least two indices (the pointer, and the struct field). 860 if (GEP1->getNumIndices() != GEP2->getNumIndices() || 861 GEP1->getNumIndices() < 2) 862 return MayAlias; 863 864 // If we don't know the size of the accesses through both GEPs, we can't 865 // determine whether the struct fields accessed can't alias. 866 if (V1Size == MemoryLocation::UnknownSize || 867 V2Size == MemoryLocation::UnknownSize) 868 return MayAlias; 869 870 ConstantInt *C1 = 871 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1)); 872 ConstantInt *C2 = 873 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1)); 874 875 // If the last (struct) indices aren't constants, we can't say anything. 876 // If they're identical, the other indices might be also be dynamically 877 // equal, so the GEPs can alias. 878 if (!C1 || !C2 || C1 == C2) 879 return MayAlias; 880 881 // Find the last-indexed type of the GEP, i.e., the type you'd get if 882 // you stripped the last index. 883 // On the way, look at each indexed type. If there's something other 884 // than an array, different indices can lead to different final types. 885 SmallVector<Value *, 8> IntermediateIndices; 886 887 // Insert the first index; we don't need to check the type indexed 888 // through it as it only drops the pointer indirection. 889 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine"); 890 IntermediateIndices.push_back(GEP1->getOperand(1)); 891 892 // Insert all the remaining indices but the last one. 893 // Also, check that they all index through arrays. 894 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) { 895 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType( 896 GEP1->getSourceElementType(), IntermediateIndices))) 897 return MayAlias; 898 IntermediateIndices.push_back(GEP1->getOperand(i + 1)); 899 } 900 901 StructType *LastIndexedStruct = 902 dyn_cast<StructType>(GetElementPtrInst::getIndexedType( 903 GEP1->getSourceElementType(), IntermediateIndices)); 904 905 if (!LastIndexedStruct) 906 return MayAlias; 907 908 // We know that: 909 // - both GEPs begin indexing from the exact same pointer; 910 // - the last indices in both GEPs are constants, indexing into a struct; 911 // - said indices are different, hence, the pointed-to fields are different; 912 // - both GEPs only index through arrays prior to that. 913 // 914 // This lets us determine that the struct that GEP1 indexes into and the 915 // struct that GEP2 indexes into must either precisely overlap or be 916 // completely disjoint. Because they cannot partially overlap, indexing into 917 // different non-overlapping fields of the struct will never alias. 918 919 // Therefore, the only remaining thing needed to show that both GEPs can't 920 // alias is that the fields are not overlapping. 921 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct); 922 const uint64_t StructSize = SL->getSizeInBytes(); 923 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue()); 924 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue()); 925 926 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size, 927 uint64_t V2Off, uint64_t V2Size) { 928 return V1Off < V2Off && V1Off + V1Size <= V2Off && 929 ((V2Off + V2Size <= StructSize) || 930 (V2Off + V2Size - StructSize <= V1Off)); 931 }; 932 933 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) || 934 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size)) 935 return NoAlias; 936 937 return MayAlias; 938 } 939 940 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction 941 /// against another pointer. We know that V1 is a GEP, but we don't know 942 /// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, DL), 943 /// UnderlyingV2 is the same for V2. 944 /// 945 AliasResult BasicAliasAnalysis::aliasGEP( 946 const GEPOperator *GEP1, uint64_t V1Size, const AAMDNodes &V1AAInfo, 947 const Value *V2, uint64_t V2Size, const AAMDNodes &V2AAInfo, 948 const Value *UnderlyingV1, const Value *UnderlyingV2) { 949 int64_t GEP1BaseOffset; 950 bool GEP1MaxLookupReached; 951 SmallVector<VariableGEPIndex, 4> GEP1VariableIndices; 952 953 // We have to get two AssumptionCaches here because GEP1 and V2 may be from 954 // different functions. 955 // FIXME: This really doesn't make any sense. We get a dominator tree below 956 // that can only refer to a single function. But this function (aliasGEP) is 957 // a method on an immutable pass that can be called when there *isn't* 958 // a single function. The old pass management layer makes this "work", but 959 // this isn't really a clean solution. 960 AssumptionCacheTracker &ACT = getAnalysis<AssumptionCacheTracker>(); 961 AssumptionCache *AC1 = nullptr, *AC2 = nullptr; 962 if (auto *GEP1I = dyn_cast<Instruction>(GEP1)) 963 AC1 = &ACT.getAssumptionCache( 964 const_cast<Function &>(*GEP1I->getParent()->getParent())); 965 if (auto *I2 = dyn_cast<Instruction>(V2)) 966 AC2 = &ACT.getAssumptionCache( 967 const_cast<Function &>(*I2->getParent()->getParent())); 968 969 DominatorTreeWrapperPass *DTWP = 970 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 971 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; 972 973 // If we have two gep instructions with must-alias or not-alias'ing base 974 // pointers, figure out if the indexes to the GEP tell us anything about the 975 // derived pointer. 976 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 977 // Do the base pointers alias? 978 AliasResult BaseAlias = 979 aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(), 980 UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes()); 981 982 // Check for geps of non-aliasing underlying pointers where the offsets are 983 // identical. 984 if ((BaseAlias == MayAlias) && V1Size == V2Size) { 985 // Do the base pointers alias assuming type and size. 986 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, 987 V1AAInfo, UnderlyingV2, 988 V2Size, V2AAInfo); 989 if (PreciseBaseAlias == NoAlias) { 990 // See if the computed offset from the common pointer tells us about the 991 // relation of the resulting pointer. 992 int64_t GEP2BaseOffset; 993 bool GEP2MaxLookupReached; 994 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 995 const Value *GEP2BasePtr = 996 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, 997 GEP2MaxLookupReached, *DL, AC2, DT); 998 const Value *GEP1BasePtr = 999 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, 1000 GEP1MaxLookupReached, *DL, AC1, DT); 1001 // DecomposeGEPExpression and GetUnderlyingObject should return the 1002 // same result except when DecomposeGEPExpression has no DataLayout. 1003 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 1004 assert(!DL && 1005 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 1006 return MayAlias; 1007 } 1008 // If the max search depth is reached the result is undefined 1009 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1010 return MayAlias; 1011 1012 // Same offsets. 1013 if (GEP1BaseOffset == GEP2BaseOffset && 1014 GEP1VariableIndices == GEP2VariableIndices) 1015 return NoAlias; 1016 GEP1VariableIndices.clear(); 1017 } 1018 } 1019 1020 // If we get a No or May, then return it immediately, no amount of analysis 1021 // will improve this situation. 1022 if (BaseAlias != MustAlias) return BaseAlias; 1023 1024 // Otherwise, we have a MustAlias. Since the base pointers alias each other 1025 // exactly, see if the computed offset from the common pointer tells us 1026 // about the relation of the resulting pointer. 1027 const Value *GEP1BasePtr = 1028 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, 1029 GEP1MaxLookupReached, *DL, AC1, DT); 1030 1031 int64_t GEP2BaseOffset; 1032 bool GEP2MaxLookupReached; 1033 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 1034 const Value *GEP2BasePtr = 1035 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, 1036 GEP2MaxLookupReached, *DL, AC2, DT); 1037 1038 // DecomposeGEPExpression and GetUnderlyingObject should return the 1039 // same result except when DecomposeGEPExpression has no DataLayout. 1040 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 1041 assert(!DL && 1042 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 1043 return MayAlias; 1044 } 1045 1046 // If we know the two GEPs are based off of the exact same pointer (and not 1047 // just the same underlying object), see if that tells us anything about 1048 // the resulting pointers. 1049 if (DL && GEP1->getPointerOperand() == GEP2->getPointerOperand()) { 1050 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, *DL); 1051 // If we couldn't find anything interesting, don't abandon just yet. 1052 if (R != MayAlias) 1053 return R; 1054 } 1055 1056 // If the max search depth is reached the result is undefined 1057 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1058 return MayAlias; 1059 1060 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1061 // symbolic difference. 1062 GEP1BaseOffset -= GEP2BaseOffset; 1063 GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices); 1064 1065 } else { 1066 // Check to see if these two pointers are related by the getelementptr 1067 // instruction. If one pointer is a GEP with a non-zero index of the other 1068 // pointer, we know they cannot alias. 1069 1070 // If both accesses are unknown size, we can't do anything useful here. 1071 if (V1Size == MemoryLocation::UnknownSize && 1072 V2Size == MemoryLocation::UnknownSize) 1073 return MayAlias; 1074 1075 AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, 1076 AAMDNodes(), V2, V2Size, V2AAInfo); 1077 if (R != MustAlias) 1078 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 1079 // If V2 is known not to alias GEP base pointer, then the two values 1080 // cannot alias per GEP semantics: "A pointer value formed from a 1081 // getelementptr instruction is associated with the addresses associated 1082 // with the first operand of the getelementptr". 1083 return R; 1084 1085 const Value *GEP1BasePtr = 1086 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, 1087 GEP1MaxLookupReached, *DL, AC1, DT); 1088 1089 // DecomposeGEPExpression and GetUnderlyingObject should return the 1090 // same result except when DecomposeGEPExpression has no DataLayout. 1091 if (GEP1BasePtr != UnderlyingV1) { 1092 assert(!DL && 1093 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 1094 return MayAlias; 1095 } 1096 // If the max search depth is reached the result is undefined 1097 if (GEP1MaxLookupReached) 1098 return MayAlias; 1099 } 1100 1101 // In the two GEP Case, if there is no difference in the offsets of the 1102 // computed pointers, the resultant pointers are a must alias. This 1103 // hapens when we have two lexically identical GEP's (for example). 1104 // 1105 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 1106 // must aliases the GEP, the end result is a must alias also. 1107 if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty()) 1108 return MustAlias; 1109 1110 // If there is a constant difference between the pointers, but the difference 1111 // is less than the size of the associated memory object, then we know 1112 // that the objects are partially overlapping. If the difference is 1113 // greater, we know they do not overlap. 1114 if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) { 1115 if (GEP1BaseOffset >= 0) { 1116 if (V2Size != MemoryLocation::UnknownSize) { 1117 if ((uint64_t)GEP1BaseOffset < V2Size) 1118 return PartialAlias; 1119 return NoAlias; 1120 } 1121 } else { 1122 // We have the situation where: 1123 // + + 1124 // | BaseOffset | 1125 // ---------------->| 1126 // |-->V1Size |-------> V2Size 1127 // GEP1 V2 1128 // We need to know that V2Size is not unknown, otherwise we might have 1129 // stripped a gep with negative index ('gep <ptr>, -1, ...). 1130 if (V1Size != MemoryLocation::UnknownSize && 1131 V2Size != MemoryLocation::UnknownSize) { 1132 if (-(uint64_t)GEP1BaseOffset < V1Size) 1133 return PartialAlias; 1134 return NoAlias; 1135 } 1136 } 1137 } 1138 1139 if (!GEP1VariableIndices.empty()) { 1140 uint64_t Modulo = 0; 1141 bool AllPositive = true; 1142 for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i) { 1143 1144 // Try to distinguish something like &A[i][1] against &A[42][0]. 1145 // Grab the least significant bit set in any of the scales. We 1146 // don't need std::abs here (even if the scale's negative) as we'll 1147 // be ^'ing Modulo with itself later. 1148 Modulo |= (uint64_t) GEP1VariableIndices[i].Scale; 1149 1150 if (AllPositive) { 1151 // If the Value could change between cycles, then any reasoning about 1152 // the Value this cycle may not hold in the next cycle. We'll just 1153 // give up if we can't determine conditions that hold for every cycle: 1154 const Value *V = GEP1VariableIndices[i].V; 1155 1156 bool SignKnownZero, SignKnownOne; 1157 ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, *DL, 1158 0, AC1, nullptr, DT); 1159 1160 // Zero-extension widens the variable, and so forces the sign 1161 // bit to zero. 1162 bool IsZExt = GEP1VariableIndices[i].Extension == EK_ZeroExt; 1163 SignKnownZero |= IsZExt; 1164 SignKnownOne &= !IsZExt; 1165 1166 // If the variable begins with a zero then we know it's 1167 // positive, regardless of whether the value is signed or 1168 // unsigned. 1169 int64_t Scale = GEP1VariableIndices[i].Scale; 1170 AllPositive = 1171 (SignKnownZero && Scale >= 0) || 1172 (SignKnownOne && Scale < 0); 1173 } 1174 } 1175 1176 Modulo = Modulo ^ (Modulo & (Modulo - 1)); 1177 1178 // We can compute the difference between the two addresses 1179 // mod Modulo. Check whether that difference guarantees that the 1180 // two locations do not alias. 1181 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1); 1182 if (V1Size != MemoryLocation::UnknownSize && 1183 V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size && 1184 V1Size <= Modulo - ModOffset) 1185 return NoAlias; 1186 1187 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr. 1188 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers 1189 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr. 1190 if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t) GEP1BaseOffset) 1191 return NoAlias; 1192 } 1193 1194 // Statically, we can see that the base objects are the same, but the 1195 // pointers have dynamic offsets which we can't resolve. And none of our 1196 // little tricks above worked. 1197 // 1198 // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the 1199 // practical effect of this is protecting TBAA in the case of dynamic 1200 // indices into arrays of unions or malloc'd memory. 1201 return PartialAlias; 1202 } 1203 1204 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1205 // If the results agree, take it. 1206 if (A == B) 1207 return A; 1208 // A mix of PartialAlias and MustAlias is PartialAlias. 1209 if ((A == PartialAlias && B == MustAlias) || 1210 (B == PartialAlias && A == MustAlias)) 1211 return PartialAlias; 1212 // Otherwise, we don't know anything. 1213 return MayAlias; 1214 } 1215 1216 /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select 1217 /// instruction against another. 1218 AliasResult BasicAliasAnalysis::aliasSelect(const SelectInst *SI, 1219 uint64_t SISize, 1220 const AAMDNodes &SIAAInfo, 1221 const Value *V2, uint64_t V2Size, 1222 const AAMDNodes &V2AAInfo) { 1223 // If the values are Selects with the same condition, we can do a more precise 1224 // check: just check for aliases between the values on corresponding arms. 1225 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1226 if (SI->getCondition() == SI2->getCondition()) { 1227 AliasResult Alias = 1228 aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, 1229 SI2->getTrueValue(), V2Size, V2AAInfo); 1230 if (Alias == MayAlias) 1231 return MayAlias; 1232 AliasResult ThisAlias = 1233 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo, 1234 SI2->getFalseValue(), V2Size, V2AAInfo); 1235 return MergeAliasResults(ThisAlias, Alias); 1236 } 1237 1238 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1239 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1240 AliasResult Alias = 1241 aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), SISize, SIAAInfo); 1242 if (Alias == MayAlias) 1243 return MayAlias; 1244 1245 AliasResult ThisAlias = 1246 aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo); 1247 return MergeAliasResults(ThisAlias, Alias); 1248 } 1249 1250 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction 1251 // against another. 1252 AliasResult BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize, 1253 const AAMDNodes &PNAAInfo, 1254 const Value *V2, uint64_t V2Size, 1255 const AAMDNodes &V2AAInfo) { 1256 // Track phi nodes we have visited. We use this information when we determine 1257 // value equivalence. 1258 VisitedPhiBBs.insert(PN->getParent()); 1259 1260 // If the values are PHIs in the same block, we can do a more precise 1261 // as well as efficient check: just check for aliases between the values 1262 // on corresponding edges. 1263 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1264 if (PN2->getParent() == PN->getParent()) { 1265 LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo), 1266 MemoryLocation(V2, V2Size, V2AAInfo)); 1267 if (PN > V2) 1268 std::swap(Locs.first, Locs.second); 1269 // Analyse the PHIs' inputs under the assumption that the PHIs are 1270 // NoAlias. 1271 // If the PHIs are May/MustAlias there must be (recursively) an input 1272 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or 1273 // there must be an operation on the PHIs within the PHIs' value cycle 1274 // that causes a MayAlias. 1275 // Pretend the phis do not alias. 1276 AliasResult Alias = NoAlias; 1277 assert(AliasCache.count(Locs) && 1278 "There must exist an entry for the phi node"); 1279 AliasResult OrigAliasResult = AliasCache[Locs]; 1280 AliasCache[Locs] = NoAlias; 1281 1282 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1283 AliasResult ThisAlias = 1284 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo, 1285 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1286 V2Size, V2AAInfo); 1287 Alias = MergeAliasResults(ThisAlias, Alias); 1288 if (Alias == MayAlias) 1289 break; 1290 } 1291 1292 // Reset if speculation failed. 1293 if (Alias != NoAlias) 1294 AliasCache[Locs] = OrigAliasResult; 1295 1296 return Alias; 1297 } 1298 1299 SmallPtrSet<Value*, 4> UniqueSrc; 1300 SmallVector<Value*, 4> V1Srcs; 1301 bool isRecursive = false; 1302 for (Value *PV1 : PN->incoming_values()) { 1303 if (isa<PHINode>(PV1)) 1304 // If any of the source itself is a PHI, return MayAlias conservatively 1305 // to avoid compile time explosion. The worst possible case is if both 1306 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1307 // and 'n' are the number of PHI sources. 1308 return MayAlias; 1309 1310 if (EnableRecPhiAnalysis) 1311 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) { 1312 // Check whether the incoming value is a GEP that advances the pointer 1313 // result of this PHI node (e.g. in a loop). If this is the case, we 1314 // would recurse and always get a MayAlias. Handle this case specially 1315 // below. 1316 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 && 1317 isa<ConstantInt>(PV1GEP->idx_begin())) { 1318 isRecursive = true; 1319 continue; 1320 } 1321 } 1322 1323 if (UniqueSrc.insert(PV1).second) 1324 V1Srcs.push_back(PV1); 1325 } 1326 1327 // If this PHI node is recursive, set the size of the accessed memory to 1328 // unknown to represent all the possible values the GEP could advance the 1329 // pointer to. 1330 if (isRecursive) 1331 PNSize = MemoryLocation::UnknownSize; 1332 1333 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, 1334 V1Srcs[0], PNSize, PNAAInfo); 1335 1336 // Early exit if the check of the first PHI source against V2 is MayAlias. 1337 // Other results are not possible. 1338 if (Alias == MayAlias) 1339 return MayAlias; 1340 1341 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1342 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1343 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1344 Value *V = V1Srcs[i]; 1345 1346 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, 1347 V, PNSize, PNAAInfo); 1348 Alias = MergeAliasResults(ThisAlias, Alias); 1349 if (Alias == MayAlias) 1350 break; 1351 } 1352 1353 return Alias; 1354 } 1355 1356 // aliasCheck - Provide a bunch of ad-hoc rules to disambiguate in common cases, 1357 // such as array references. 1358 // 1359 AliasResult BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, 1360 AAMDNodes V1AAInfo, const Value *V2, 1361 uint64_t V2Size, 1362 AAMDNodes V2AAInfo) { 1363 // If either of the memory references is empty, it doesn't matter what the 1364 // pointer values are. 1365 if (V1Size == 0 || V2Size == 0) 1366 return NoAlias; 1367 1368 // Strip off any casts if they exist. 1369 V1 = V1->stripPointerCasts(); 1370 V2 = V2->stripPointerCasts(); 1371 1372 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1373 // value for undef that aliases nothing in the program. 1374 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1375 return NoAlias; 1376 1377 // Are we checking for alias of the same value? 1378 // Because we look 'through' phi nodes we could look at "Value" pointers from 1379 // different iterations. We must therefore make sure that this is not the 1380 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1381 // happen by looking at the visited phi nodes and making sure they cannot 1382 // reach the value. 1383 if (isValueEqualInPotentialCycles(V1, V2)) 1384 return MustAlias; 1385 1386 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1387 return NoAlias; // Scalars cannot alias each other 1388 1389 // Figure out what objects these things are pointing to if we can. 1390 const Value *O1 = GetUnderlyingObject(V1, *DL, MaxLookupSearchDepth); 1391 const Value *O2 = GetUnderlyingObject(V2, *DL, MaxLookupSearchDepth); 1392 1393 // Null values in the default address space don't point to any object, so they 1394 // don't alias any other pointer. 1395 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1396 if (CPN->getType()->getAddressSpace() == 0) 1397 return NoAlias; 1398 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1399 if (CPN->getType()->getAddressSpace() == 0) 1400 return NoAlias; 1401 1402 if (O1 != O2) { 1403 // If V1/V2 point to two different objects we know that we have no alias. 1404 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1405 return NoAlias; 1406 1407 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1408 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1409 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1410 return NoAlias; 1411 1412 // Function arguments can't alias with things that are known to be 1413 // unambigously identified at the function level. 1414 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1415 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1416 return NoAlias; 1417 1418 // Most objects can't alias null. 1419 if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) || 1420 (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2))) 1421 return NoAlias; 1422 1423 // If one pointer is the result of a call/invoke or load and the other is a 1424 // non-escaping local object within the same function, then we know the 1425 // object couldn't escape to a point where the call could return it. 1426 // 1427 // Note that if the pointers are in different functions, there are a 1428 // variety of complications. A call with a nocapture argument may still 1429 // temporary store the nocapture argument's value in a temporary memory 1430 // location if that memory location doesn't escape. Or it may pass a 1431 // nocapture value to other functions as long as they don't capture it. 1432 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2)) 1433 return NoAlias; 1434 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1)) 1435 return NoAlias; 1436 } 1437 1438 // If the size of one access is larger than the entire object on the other 1439 // side, then we know such behavior is undefined and can assume no alias. 1440 if (DL) 1441 if ((V1Size != MemoryLocation::UnknownSize && 1442 isObjectSmallerThan(O2, V1Size, *DL, *TLI)) || 1443 (V2Size != MemoryLocation::UnknownSize && 1444 isObjectSmallerThan(O1, V2Size, *DL, *TLI))) 1445 return NoAlias; 1446 1447 // Check the cache before climbing up use-def chains. This also terminates 1448 // otherwise infinitely recursive queries. 1449 LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo), 1450 MemoryLocation(V2, V2Size, V2AAInfo)); 1451 if (V1 > V2) 1452 std::swap(Locs.first, Locs.second); 1453 std::pair<AliasCacheTy::iterator, bool> Pair = 1454 AliasCache.insert(std::make_pair(Locs, MayAlias)); 1455 if (!Pair.second) 1456 return Pair.first->second; 1457 1458 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the 1459 // GEP can't simplify, we don't even look at the PHI cases. 1460 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) { 1461 std::swap(V1, V2); 1462 std::swap(V1Size, V2Size); 1463 std::swap(O1, O2); 1464 std::swap(V1AAInfo, V2AAInfo); 1465 } 1466 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1467 AliasResult Result = aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2); 1468 if (Result != MayAlias) return AliasCache[Locs] = Result; 1469 } 1470 1471 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { 1472 std::swap(V1, V2); 1473 std::swap(V1Size, V2Size); 1474 std::swap(V1AAInfo, V2AAInfo); 1475 } 1476 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1477 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, 1478 V2, V2Size, V2AAInfo); 1479 if (Result != MayAlias) return AliasCache[Locs] = Result; 1480 } 1481 1482 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { 1483 std::swap(V1, V2); 1484 std::swap(V1Size, V2Size); 1485 std::swap(V1AAInfo, V2AAInfo); 1486 } 1487 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1488 AliasResult Result = aliasSelect(S1, V1Size, V1AAInfo, 1489 V2, V2Size, V2AAInfo); 1490 if (Result != MayAlias) return AliasCache[Locs] = Result; 1491 } 1492 1493 // If both pointers are pointing into the same object and one of them 1494 // accesses is accessing the entire object, then the accesses must 1495 // overlap in some way. 1496 if (DL && O1 == O2) 1497 if ((V1Size != MemoryLocation::UnknownSize && 1498 isObjectSize(O1, V1Size, *DL, *TLI)) || 1499 (V2Size != MemoryLocation::UnknownSize && 1500 isObjectSize(O2, V2Size, *DL, *TLI))) 1501 return AliasCache[Locs] = PartialAlias; 1502 1503 AliasResult Result = 1504 AliasAnalysis::alias(MemoryLocation(V1, V1Size, V1AAInfo), 1505 MemoryLocation(V2, V2Size, V2AAInfo)); 1506 return AliasCache[Locs] = Result; 1507 } 1508 1509 bool BasicAliasAnalysis::isValueEqualInPotentialCycles(const Value *V, 1510 const Value *V2) { 1511 if (V != V2) 1512 return false; 1513 1514 const Instruction *Inst = dyn_cast<Instruction>(V); 1515 if (!Inst) 1516 return true; 1517 1518 if (VisitedPhiBBs.empty()) 1519 return true; 1520 1521 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1522 return false; 1523 1524 // Use dominance or loop info if available. 1525 DominatorTreeWrapperPass *DTWP = 1526 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 1527 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; 1528 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 1529 LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; 1530 1531 // Make sure that the visited phis cannot reach the Value. This ensures that 1532 // the Values cannot come from different iterations of a potential cycle the 1533 // phi nodes could be involved in. 1534 for (auto *P : VisitedPhiBBs) 1535 if (isPotentiallyReachable(P->begin(), Inst, DT, LI)) 1536 return false; 1537 1538 return true; 1539 } 1540 1541 /// GetIndexDifference - Dest and Src are the variable indices from two 1542 /// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base 1543 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic 1544 /// difference between the two pointers. 1545 void BasicAliasAnalysis::GetIndexDifference( 1546 SmallVectorImpl<VariableGEPIndex> &Dest, 1547 const SmallVectorImpl<VariableGEPIndex> &Src) { 1548 if (Src.empty()) 1549 return; 1550 1551 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 1552 const Value *V = Src[i].V; 1553 ExtensionKind Extension = Src[i].Extension; 1554 int64_t Scale = Src[i].Scale; 1555 1556 // Find V in Dest. This is N^2, but pointer indices almost never have more 1557 // than a few variable indexes. 1558 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 1559 if (!isValueEqualInPotentialCycles(Dest[j].V, V) || 1560 Dest[j].Extension != Extension) 1561 continue; 1562 1563 // If we found it, subtract off Scale V's from the entry in Dest. If it 1564 // goes to zero, remove the entry. 1565 if (Dest[j].Scale != Scale) 1566 Dest[j].Scale -= Scale; 1567 else 1568 Dest.erase(Dest.begin() + j); 1569 Scale = 0; 1570 break; 1571 } 1572 1573 // If we didn't consume this entry, add it to the end of the Dest list. 1574 if (Scale) { 1575 VariableGEPIndex Entry = { V, Extension, -Scale }; 1576 Dest.push_back(Entry); 1577 } 1578 } 1579 } 1580