1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the primary stateless implementation of the 11 // Alias Analysis interface that implements identities (two different 12 // globals cannot alias, etc), but does no stateful analysis. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/Passes.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/Analysis/AssumptionCache.h" 21 #include "llvm/Analysis/CFG.h" 22 #include "llvm/Analysis/CaptureTracking.h" 23 #include "llvm/Analysis/InstructionSimplify.h" 24 #include "llvm/Analysis/LoopInfo.h" 25 #include "llvm/Analysis/MemoryBuiltins.h" 26 #include "llvm/Analysis/TargetLibraryInfo.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/Constants.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Dominators.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/GetElementPtrTypeIterator.h" 34 #include "llvm/IR/GlobalAlias.h" 35 #include "llvm/IR/GlobalVariable.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/LLVMContext.h" 39 #include "llvm/IR/Operator.h" 40 #include "llvm/Pass.h" 41 #include "llvm/Support/ErrorHandling.h" 42 #include <algorithm> 43 using namespace llvm; 44 45 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 46 /// in a cycle. Because we are analysing 'through' phi nodes we need to be 47 /// careful with value equivalence. We use reachability to make sure a value 48 /// cannot be involved in a cycle. 49 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 50 51 // The max limit of the search depth in DecomposeGEPExpression() and 52 // GetUnderlyingObject(), both functions need to use the same search 53 // depth otherwise the algorithm in aliasGEP will assert. 54 static const unsigned MaxLookupSearchDepth = 6; 55 56 //===----------------------------------------------------------------------===// 57 // Useful predicates 58 //===----------------------------------------------------------------------===// 59 60 /// isNonEscapingLocalObject - Return true if the pointer is to a function-local 61 /// object that never escapes from the function. 62 static bool isNonEscapingLocalObject(const Value *V) { 63 // If this is a local allocation, check to see if it escapes. 64 if (isa<AllocaInst>(V) || isNoAliasCall(V)) 65 // Set StoreCaptures to True so that we can assume in our callers that the 66 // pointer is not the result of a load instruction. Currently 67 // PointerMayBeCaptured doesn't have any special analysis for the 68 // StoreCaptures=false case; if it did, our callers could be refined to be 69 // more precise. 70 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 71 72 // If this is an argument that corresponds to a byval or noalias argument, 73 // then it has not escaped before entering the function. Check if it escapes 74 // inside the function. 75 if (const Argument *A = dyn_cast<Argument>(V)) 76 if (A->hasByValAttr() || A->hasNoAliasAttr()) 77 // Note even if the argument is marked nocapture we still need to check 78 // for copies made inside the function. The nocapture attribute only 79 // specifies that there are no copies made that outlive the function. 80 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 81 82 return false; 83 } 84 85 /// isEscapeSource - Return true if the pointer is one which would have 86 /// been considered an escape by isNonEscapingLocalObject. 87 static bool isEscapeSource(const Value *V) { 88 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V)) 89 return true; 90 91 // The load case works because isNonEscapingLocalObject considers all 92 // stores to be escapes (it passes true for the StoreCaptures argument 93 // to PointerMayBeCaptured). 94 if (isa<LoadInst>(V)) 95 return true; 96 97 return false; 98 } 99 100 /// getObjectSize - Return the size of the object specified by V, or 101 /// UnknownSize if unknown. 102 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 103 const TargetLibraryInfo &TLI, 104 bool RoundToAlign = false) { 105 uint64_t Size; 106 if (getObjectSize(V, Size, DL, &TLI, RoundToAlign)) 107 return Size; 108 return MemoryLocation::UnknownSize; 109 } 110 111 /// isObjectSmallerThan - Return true if we can prove that the object specified 112 /// by V is smaller than Size. 113 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 114 const DataLayout &DL, 115 const TargetLibraryInfo &TLI) { 116 // Note that the meanings of the "object" are slightly different in the 117 // following contexts: 118 // c1: llvm::getObjectSize() 119 // c2: llvm.objectsize() intrinsic 120 // c3: isObjectSmallerThan() 121 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 122 // refers to the "entire object". 123 // 124 // Consider this example: 125 // char *p = (char*)malloc(100) 126 // char *q = p+80; 127 // 128 // In the context of c1 and c2, the "object" pointed by q refers to the 129 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 130 // 131 // However, in the context of c3, the "object" refers to the chunk of memory 132 // being allocated. So, the "object" has 100 bytes, and q points to the middle 133 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 134 // parameter, before the llvm::getObjectSize() is called to get the size of 135 // entire object, we should: 136 // - either rewind the pointer q to the base-address of the object in 137 // question (in this case rewind to p), or 138 // - just give up. It is up to caller to make sure the pointer is pointing 139 // to the base address the object. 140 // 141 // We go for 2nd option for simplicity. 142 if (!isIdentifiedObject(V)) 143 return false; 144 145 // This function needs to use the aligned object size because we allow 146 // reads a bit past the end given sufficient alignment. 147 uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/true); 148 149 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 150 } 151 152 /// isObjectSize - Return true if we can prove that the object specified 153 /// by V has size Size. 154 static bool isObjectSize(const Value *V, uint64_t Size, 155 const DataLayout &DL, const TargetLibraryInfo &TLI) { 156 uint64_t ObjectSize = getObjectSize(V, DL, TLI); 157 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 158 } 159 160 //===----------------------------------------------------------------------===// 161 // GetElementPtr Instruction Decomposition and Analysis 162 //===----------------------------------------------------------------------===// 163 164 namespace { 165 enum ExtensionKind { 166 EK_NotExtended, 167 EK_SignExt, 168 EK_ZeroExt 169 }; 170 171 struct VariableGEPIndex { 172 const Value *V; 173 ExtensionKind Extension; 174 int64_t Scale; 175 176 bool operator==(const VariableGEPIndex &Other) const { 177 return V == Other.V && Extension == Other.Extension && 178 Scale == Other.Scale; 179 } 180 181 bool operator!=(const VariableGEPIndex &Other) const { 182 return !operator==(Other); 183 } 184 }; 185 } 186 187 188 /// GetLinearExpression - Analyze the specified value as a linear expression: 189 /// "A*V + B", where A and B are constant integers. Return the scale and offset 190 /// values as APInts and return V as a Value*, and return whether we looked 191 /// through any sign or zero extends. The incoming Value is known to have 192 /// IntegerType and it may already be sign or zero extended. 193 /// 194 /// Note that this looks through extends, so the high bits may not be 195 /// represented in the result. 196 static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset, 197 ExtensionKind &Extension, 198 const DataLayout &DL, unsigned Depth, 199 AssumptionCache *AC, DominatorTree *DT) { 200 assert(V->getType()->isIntegerTy() && "Not an integer value"); 201 202 // Limit our recursion depth. 203 if (Depth == 6) { 204 Scale = 1; 205 Offset = 0; 206 return V; 207 } 208 209 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 210 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 211 switch (BOp->getOpcode()) { 212 default: break; 213 case Instruction::Or: 214 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 215 // analyze it. 216 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 217 BOp, DT)) 218 break; 219 // FALL THROUGH. 220 case Instruction::Add: 221 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 222 DL, Depth + 1, AC, DT); 223 Offset += RHSC->getValue(); 224 return V; 225 case Instruction::Mul: 226 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 227 DL, Depth + 1, AC, DT); 228 Offset *= RHSC->getValue(); 229 Scale *= RHSC->getValue(); 230 return V; 231 case Instruction::Shl: 232 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 233 DL, Depth + 1, AC, DT); 234 Offset <<= RHSC->getValue().getLimitedValue(); 235 Scale <<= RHSC->getValue().getLimitedValue(); 236 return V; 237 } 238 } 239 } 240 241 // Since GEP indices are sign extended anyway, we don't care about the high 242 // bits of a sign or zero extended value - just scales and offsets. The 243 // extensions have to be consistent though. 244 if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) || 245 (isa<ZExtInst>(V) && Extension != EK_SignExt)) { 246 Value *CastOp = cast<CastInst>(V)->getOperand(0); 247 unsigned OldWidth = Scale.getBitWidth(); 248 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 249 Scale = Scale.trunc(SmallWidth); 250 Offset = Offset.trunc(SmallWidth); 251 Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt; 252 253 Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension, DL, 254 Depth + 1, AC, DT); 255 Scale = Scale.zext(OldWidth); 256 Offset = Offset.zext(OldWidth); 257 258 return Result; 259 } 260 261 Scale = 1; 262 Offset = 0; 263 return V; 264 } 265 266 /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it 267 /// into a base pointer with a constant offset and a number of scaled symbolic 268 /// offsets. 269 /// 270 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale in 271 /// the VarIndices vector) are Value*'s that are known to be scaled by the 272 /// specified amount, but which may have other unrepresented high bits. As such, 273 /// the gep cannot necessarily be reconstructed from its decomposed form. 274 /// 275 /// When DataLayout is around, this function is capable of analyzing everything 276 /// that GetUnderlyingObject can look through. To be able to do that 277 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search 278 /// depth (MaxLookupSearchDepth). 279 /// When DataLayout not is around, it just looks through pointer casts. 280 /// 281 static const Value * 282 DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, 283 SmallVectorImpl<VariableGEPIndex> &VarIndices, 284 bool &MaxLookupReached, const DataLayout &DL, 285 AssumptionCache *AC, DominatorTree *DT) { 286 // Limit recursion depth to limit compile time in crazy cases. 287 unsigned MaxLookup = MaxLookupSearchDepth; 288 MaxLookupReached = false; 289 290 BaseOffs = 0; 291 do { 292 // See if this is a bitcast or GEP. 293 const Operator *Op = dyn_cast<Operator>(V); 294 if (!Op) { 295 // The only non-operator case we can handle are GlobalAliases. 296 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 297 if (!GA->mayBeOverridden()) { 298 V = GA->getAliasee(); 299 continue; 300 } 301 } 302 return V; 303 } 304 305 if (Op->getOpcode() == Instruction::BitCast || 306 Op->getOpcode() == Instruction::AddrSpaceCast) { 307 V = Op->getOperand(0); 308 continue; 309 } 310 311 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 312 if (!GEPOp) { 313 // If it's not a GEP, hand it off to SimplifyInstruction to see if it 314 // can come up with something. This matches what GetUnderlyingObject does. 315 if (const Instruction *I = dyn_cast<Instruction>(V)) 316 // TODO: Get a DominatorTree and AssumptionCache and use them here 317 // (these are both now available in this function, but this should be 318 // updated when GetUnderlyingObject is updated). TLI should be 319 // provided also. 320 if (const Value *Simplified = 321 SimplifyInstruction(const_cast<Instruction *>(I), DL)) { 322 V = Simplified; 323 continue; 324 } 325 326 return V; 327 } 328 329 // Don't attempt to analyze GEPs over unsized objects. 330 if (!GEPOp->getOperand(0)->getType()->getPointerElementType()->isSized()) 331 return V; 332 333 unsigned AS = GEPOp->getPointerAddressSpace(); 334 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 335 gep_type_iterator GTI = gep_type_begin(GEPOp); 336 for (User::const_op_iterator I = GEPOp->op_begin()+1, 337 E = GEPOp->op_end(); I != E; ++I) { 338 Value *Index = *I; 339 // Compute the (potentially symbolic) offset in bytes for this index. 340 if (StructType *STy = dyn_cast<StructType>(*GTI++)) { 341 // For a struct, add the member offset. 342 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 343 if (FieldNo == 0) continue; 344 345 BaseOffs += DL.getStructLayout(STy)->getElementOffset(FieldNo); 346 continue; 347 } 348 349 // For an array/pointer, add the element offset, explicitly scaled. 350 if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 351 if (CIdx->isZero()) continue; 352 BaseOffs += DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue(); 353 continue; 354 } 355 356 uint64_t Scale = DL.getTypeAllocSize(*GTI); 357 ExtensionKind Extension = EK_NotExtended; 358 359 // If the integer type is smaller than the pointer size, it is implicitly 360 // sign extended to pointer size. 361 unsigned Width = Index->getType()->getIntegerBitWidth(); 362 if (DL.getPointerSizeInBits(AS) > Width) 363 Extension = EK_SignExt; 364 365 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 366 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 367 Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, DL, 368 0, AC, DT); 369 370 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 371 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 372 BaseOffs += IndexOffset.getSExtValue()*Scale; 373 Scale *= IndexScale.getSExtValue(); 374 375 // If we already had an occurrence of this index variable, merge this 376 // scale into it. For example, we want to handle: 377 // A[x][x] -> x*16 + x*4 -> x*20 378 // This also ensures that 'x' only appears in the index list once. 379 for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) { 380 if (VarIndices[i].V == Index && 381 VarIndices[i].Extension == Extension) { 382 Scale += VarIndices[i].Scale; 383 VarIndices.erase(VarIndices.begin()+i); 384 break; 385 } 386 } 387 388 // Make sure that we have a scale that makes sense for this target's 389 // pointer size. 390 if (unsigned ShiftBits = 64 - DL.getPointerSizeInBits(AS)) { 391 Scale <<= ShiftBits; 392 Scale = (int64_t)Scale >> ShiftBits; 393 } 394 395 if (Scale) { 396 VariableGEPIndex Entry = {Index, Extension, 397 static_cast<int64_t>(Scale)}; 398 VarIndices.push_back(Entry); 399 } 400 } 401 402 // Analyze the base pointer next. 403 V = GEPOp->getOperand(0); 404 } while (--MaxLookup); 405 406 // If the chain of expressions is too deep, just return early. 407 MaxLookupReached = true; 408 return V; 409 } 410 411 //===----------------------------------------------------------------------===// 412 // BasicAliasAnalysis Pass 413 //===----------------------------------------------------------------------===// 414 415 #ifndef NDEBUG 416 static const Function *getParent(const Value *V) { 417 if (const Instruction *inst = dyn_cast<Instruction>(V)) 418 return inst->getParent()->getParent(); 419 420 if (const Argument *arg = dyn_cast<Argument>(V)) 421 return arg->getParent(); 422 423 return nullptr; 424 } 425 426 static bool notDifferentParent(const Value *O1, const Value *O2) { 427 428 const Function *F1 = getParent(O1); 429 const Function *F2 = getParent(O2); 430 431 return !F1 || !F2 || F1 == F2; 432 } 433 #endif 434 435 namespace { 436 /// BasicAliasAnalysis - This is the primary alias analysis implementation. 437 struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis { 438 static char ID; // Class identification, replacement for typeinfo 439 BasicAliasAnalysis() : ImmutablePass(ID) { 440 initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry()); 441 } 442 443 bool doInitialization(Module &M) override; 444 445 void getAnalysisUsage(AnalysisUsage &AU) const override { 446 AU.addRequired<AliasAnalysis>(); 447 AU.addRequired<AssumptionCacheTracker>(); 448 AU.addRequired<TargetLibraryInfoWrapperPass>(); 449 } 450 451 AliasResult alias(const MemoryLocation &LocA, 452 const MemoryLocation &LocB) override { 453 assert(AliasCache.empty() && "AliasCache must be cleared after use!"); 454 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 455 "BasicAliasAnalysis doesn't support interprocedural queries."); 456 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, 457 LocB.Ptr, LocB.Size, LocB.AATags); 458 // AliasCache rarely has more than 1 or 2 elements, always use 459 // shrink_and_clear so it quickly returns to the inline capacity of the 460 // SmallDenseMap if it ever grows larger. 461 // FIXME: This should really be shrink_to_inline_capacity_and_clear(). 462 AliasCache.shrink_and_clear(); 463 VisitedPhiBBs.clear(); 464 return Alias; 465 } 466 467 ModRefResult getModRefInfo(ImmutableCallSite CS, 468 const MemoryLocation &Loc) override; 469 470 ModRefResult getModRefInfo(ImmutableCallSite CS1, 471 ImmutableCallSite CS2) override; 472 473 /// pointsToConstantMemory - Chase pointers until we find a (constant 474 /// global) or not. 475 bool pointsToConstantMemory(const MemoryLocation &Loc, 476 bool OrLocal) override; 477 478 /// Get the location associated with a pointer argument of a callsite. 479 ModRefResult getArgModRefInfo(ImmutableCallSite CS, 480 unsigned ArgIdx) override; 481 482 /// getModRefBehavior - Return the behavior when calling the given 483 /// call site. 484 ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override; 485 486 /// getModRefBehavior - Return the behavior when calling the given function. 487 /// For use when the call site is not known. 488 ModRefBehavior getModRefBehavior(const Function *F) override; 489 490 /// getAdjustedAnalysisPointer - This method is used when a pass implements 491 /// an analysis interface through multiple inheritance. If needed, it 492 /// should override this to adjust the this pointer as needed for the 493 /// specified pass info. 494 void *getAdjustedAnalysisPointer(const void *ID) override { 495 if (ID == &AliasAnalysis::ID) 496 return (AliasAnalysis*)this; 497 return this; 498 } 499 500 private: 501 // AliasCache - Track alias queries to guard against recursion. 502 typedef std::pair<MemoryLocation, MemoryLocation> LocPair; 503 typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy; 504 AliasCacheTy AliasCache; 505 506 /// \brief Track phi nodes we have visited. When interpret "Value" pointer 507 /// equality as value equality we need to make sure that the "Value" is not 508 /// part of a cycle. Otherwise, two uses could come from different 509 /// "iterations" of a cycle and see different values for the same "Value" 510 /// pointer. 511 /// The following example shows the problem: 512 /// %p = phi(%alloca1, %addr2) 513 /// %l = load %ptr 514 /// %addr1 = gep, %alloca2, 0, %l 515 /// %addr2 = gep %alloca2, 0, (%l + 1) 516 /// alias(%p, %addr1) -> MayAlias ! 517 /// store %l, ... 518 SmallPtrSet<const BasicBlock*, 8> VisitedPhiBBs; 519 520 // Visited - Track instructions visited by pointsToConstantMemory. 521 SmallPtrSet<const Value*, 16> Visited; 522 523 /// \brief Check whether two Values can be considered equivalent. 524 /// 525 /// In addition to pointer equivalence of \p V1 and \p V2 this checks 526 /// whether they can not be part of a cycle in the value graph by looking at 527 /// all visited phi nodes an making sure that the phis cannot reach the 528 /// value. We have to do this because we are looking through phi nodes (That 529 /// is we say noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 530 bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2); 531 532 /// \brief Dest and Src are the variable indices from two decomposed 533 /// GetElementPtr instructions GEP1 and GEP2 which have common base 534 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic 535 /// difference between the two pointers. 536 void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest, 537 const SmallVectorImpl<VariableGEPIndex> &Src); 538 539 // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP 540 // instruction against another. 541 AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size, 542 const AAMDNodes &V1AAInfo, 543 const Value *V2, uint64_t V2Size, 544 const AAMDNodes &V2AAInfo, 545 const Value *UnderlyingV1, const Value *UnderlyingV2); 546 547 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI 548 // instruction against another. 549 AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize, 550 const AAMDNodes &PNAAInfo, 551 const Value *V2, uint64_t V2Size, 552 const AAMDNodes &V2AAInfo); 553 554 /// aliasSelect - Disambiguate a Select instruction against another value. 555 AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize, 556 const AAMDNodes &SIAAInfo, 557 const Value *V2, uint64_t V2Size, 558 const AAMDNodes &V2AAInfo); 559 560 AliasResult aliasCheck(const Value *V1, uint64_t V1Size, 561 AAMDNodes V1AATag, 562 const Value *V2, uint64_t V2Size, 563 AAMDNodes V2AATag); 564 }; 565 } // End of anonymous namespace 566 567 // Register this pass... 568 char BasicAliasAnalysis::ID = 0; 569 INITIALIZE_AG_PASS_BEGIN(BasicAliasAnalysis, AliasAnalysis, "basicaa", 570 "Basic Alias Analysis (stateless AA impl)", 571 false, true, false) 572 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 573 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 574 INITIALIZE_AG_PASS_END(BasicAliasAnalysis, AliasAnalysis, "basicaa", 575 "Basic Alias Analysis (stateless AA impl)", 576 false, true, false) 577 578 579 ImmutablePass *llvm::createBasicAliasAnalysisPass() { 580 return new BasicAliasAnalysis(); 581 } 582 583 /// pointsToConstantMemory - Returns whether the given pointer value 584 /// points to memory that is local to the function, with global constants being 585 /// considered local to all functions. 586 bool BasicAliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc, 587 bool OrLocal) { 588 assert(Visited.empty() && "Visited must be cleared after use!"); 589 590 unsigned MaxLookup = 8; 591 SmallVector<const Value *, 16> Worklist; 592 Worklist.push_back(Loc.Ptr); 593 do { 594 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), *DL); 595 if (!Visited.insert(V).second) { 596 Visited.clear(); 597 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 598 } 599 600 // An alloca instruction defines local memory. 601 if (OrLocal && isa<AllocaInst>(V)) 602 continue; 603 604 // A global constant counts as local memory for our purposes. 605 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 606 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 607 // global to be marked constant in some modules and non-constant in 608 // others. GV may even be a declaration, not a definition. 609 if (!GV->isConstant()) { 610 Visited.clear(); 611 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 612 } 613 continue; 614 } 615 616 // If both select values point to local memory, then so does the select. 617 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 618 Worklist.push_back(SI->getTrueValue()); 619 Worklist.push_back(SI->getFalseValue()); 620 continue; 621 } 622 623 // If all values incoming to a phi node point to local memory, then so does 624 // the phi. 625 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 626 // Don't bother inspecting phi nodes with many operands. 627 if (PN->getNumIncomingValues() > MaxLookup) { 628 Visited.clear(); 629 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 630 } 631 for (Value *IncValue : PN->incoming_values()) 632 Worklist.push_back(IncValue); 633 continue; 634 } 635 636 // Otherwise be conservative. 637 Visited.clear(); 638 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 639 640 } while (!Worklist.empty() && --MaxLookup); 641 642 Visited.clear(); 643 return Worklist.empty(); 644 } 645 646 // FIXME: This code is duplicated with MemoryLocation and should be hoisted to 647 // some common utility location. 648 static bool isMemsetPattern16(const Function *MS, 649 const TargetLibraryInfo &TLI) { 650 if (TLI.has(LibFunc::memset_pattern16) && 651 MS->getName() == "memset_pattern16") { 652 FunctionType *MemsetType = MS->getFunctionType(); 653 if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 && 654 isa<PointerType>(MemsetType->getParamType(0)) && 655 isa<PointerType>(MemsetType->getParamType(1)) && 656 isa<IntegerType>(MemsetType->getParamType(2))) 657 return true; 658 } 659 660 return false; 661 } 662 663 /// getModRefBehavior - Return the behavior when calling the given call site. 664 AliasAnalysis::ModRefBehavior 665 BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) { 666 if (CS.doesNotAccessMemory()) 667 // Can't do better than this. 668 return DoesNotAccessMemory; 669 670 ModRefBehavior Min = UnknownModRefBehavior; 671 672 // If the callsite knows it only reads memory, don't return worse 673 // than that. 674 if (CS.onlyReadsMemory()) 675 Min = OnlyReadsMemory; 676 677 if (CS.onlyAccessesArgMemory()) 678 Min = ModRefBehavior(Min & OnlyAccessesArgumentPointees); 679 680 // The AliasAnalysis base class has some smarts, lets use them. 681 return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min); 682 } 683 684 /// getModRefBehavior - Return the behavior when calling the given function. 685 /// For use when the call site is not known. 686 AliasAnalysis::ModRefBehavior 687 BasicAliasAnalysis::getModRefBehavior(const Function *F) { 688 // If the function declares it doesn't access memory, we can't do better. 689 if (F->doesNotAccessMemory()) 690 return DoesNotAccessMemory; 691 692 // For intrinsics, we can check the table. 693 if (Intrinsic::ID iid = F->getIntrinsicID()) { 694 #define GET_INTRINSIC_MODREF_BEHAVIOR 695 #include "llvm/IR/Intrinsics.gen" 696 #undef GET_INTRINSIC_MODREF_BEHAVIOR 697 } 698 699 ModRefBehavior Min = UnknownModRefBehavior; 700 701 // If the function declares it only reads memory, go with that. 702 if (F->onlyReadsMemory()) 703 Min = OnlyReadsMemory; 704 705 if (F->onlyAccessesArgMemory()) 706 Min = ModRefBehavior(Min & OnlyAccessesArgumentPointees); 707 708 const TargetLibraryInfo &TLI = 709 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 710 if (isMemsetPattern16(F, TLI)) 711 Min = OnlyAccessesArgumentPointees; 712 713 // Otherwise be conservative. 714 return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min); 715 } 716 717 AliasAnalysis::ModRefResult 718 BasicAliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) { 719 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) 720 switch (II->getIntrinsicID()) { 721 default: 722 break; 723 case Intrinsic::memset: 724 case Intrinsic::memcpy: 725 case Intrinsic::memmove: 726 assert((ArgIdx == 0 || ArgIdx == 1) && 727 "Invalid argument index for memory intrinsic"); 728 return ArgIdx ? Ref : Mod; 729 } 730 731 // We can bound the aliasing properties of memset_pattern16 just as we can 732 // for memcpy/memset. This is particularly important because the 733 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 734 // whenever possible. 735 if (CS.getCalledFunction() && 736 isMemsetPattern16(CS.getCalledFunction(), *TLI)) { 737 assert((ArgIdx == 0 || ArgIdx == 1) && 738 "Invalid argument index for memset_pattern16"); 739 return ArgIdx ? Ref : Mod; 740 } 741 // FIXME: Handle memset_pattern4 and memset_pattern8 also. 742 743 return AliasAnalysis::getArgModRefInfo(CS, ArgIdx); 744 } 745 746 static bool isAssumeIntrinsic(ImmutableCallSite CS) { 747 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()); 748 if (II && II->getIntrinsicID() == Intrinsic::assume) 749 return true; 750 751 return false; 752 } 753 754 bool BasicAliasAnalysis::doInitialization(Module &M) { 755 InitializeAliasAnalysis(this, &M.getDataLayout()); 756 return true; 757 } 758 759 /// getModRefInfo - Check to see if the specified callsite can clobber the 760 /// specified memory object. Since we only look at local properties of this 761 /// function, we really can't say much about this query. We do, however, use 762 /// simple "address taken" analysis on local objects. 763 AliasAnalysis::ModRefResult 764 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, 765 const MemoryLocation &Loc) { 766 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) && 767 "AliasAnalysis query involving multiple functions!"); 768 769 const Value *Object = GetUnderlyingObject(Loc.Ptr, *DL); 770 771 // If this is a tail call and Loc.Ptr points to a stack location, we know that 772 // the tail call cannot access or modify the local stack. 773 // We cannot exclude byval arguments here; these belong to the caller of 774 // the current function not to the current function, and a tail callee 775 // may reference them. 776 if (isa<AllocaInst>(Object)) 777 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) 778 if (CI->isTailCall()) 779 return NoModRef; 780 781 // If the pointer is to a locally allocated object that does not escape, 782 // then the call can not mod/ref the pointer unless the call takes the pointer 783 // as an argument, and itself doesn't capture it. 784 if (!isa<Constant>(Object) && CS.getInstruction() != Object && 785 isNonEscapingLocalObject(Object)) { 786 bool PassedAsArg = false; 787 unsigned ArgNo = 0; 788 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 789 CI != CE; ++CI, ++ArgNo) { 790 // Only look at the no-capture or byval pointer arguments. If this 791 // pointer were passed to arguments that were neither of these, then it 792 // couldn't be no-capture. 793 if (!(*CI)->getType()->isPointerTy() || 794 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo))) 795 continue; 796 797 // If this is a no-capture pointer argument, see if we can tell that it 798 // is impossible to alias the pointer we're checking. If not, we have to 799 // assume that the call could touch the pointer, even though it doesn't 800 // escape. 801 if (!isNoAlias(MemoryLocation(*CI), MemoryLocation(Object))) { 802 PassedAsArg = true; 803 break; 804 } 805 } 806 807 if (!PassedAsArg) 808 return NoModRef; 809 } 810 811 // While the assume intrinsic is marked as arbitrarily writing so that 812 // proper control dependencies will be maintained, it never aliases any 813 // particular memory location. 814 if (isAssumeIntrinsic(CS)) 815 return NoModRef; 816 817 // The AliasAnalysis base class has some smarts, lets use them. 818 return AliasAnalysis::getModRefInfo(CS, Loc); 819 } 820 821 AliasAnalysis::ModRefResult 822 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS1, 823 ImmutableCallSite CS2) { 824 // While the assume intrinsic is marked as arbitrarily writing so that 825 // proper control dependencies will be maintained, it never aliases any 826 // particular memory location. 827 if (isAssumeIntrinsic(CS1) || isAssumeIntrinsic(CS2)) 828 return NoModRef; 829 830 // The AliasAnalysis base class has some smarts, lets use them. 831 return AliasAnalysis::getModRefInfo(CS1, CS2); 832 } 833 834 /// \brief Provide ad-hoc rules to disambiguate accesses through two GEP 835 /// operators, both having the exact same pointer operand. 836 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, 837 uint64_t V1Size, 838 const GEPOperator *GEP2, 839 uint64_t V2Size, 840 const DataLayout &DL) { 841 842 assert(GEP1->getPointerOperand() == GEP2->getPointerOperand() && 843 "Expected GEPs with the same pointer operand"); 844 845 // Try to determine whether GEP1 and GEP2 index through arrays, into structs, 846 // such that the struct field accesses provably cannot alias. 847 // We also need at least two indices (the pointer, and the struct field). 848 if (GEP1->getNumIndices() != GEP2->getNumIndices() || 849 GEP1->getNumIndices() < 2) 850 return MayAlias; 851 852 // If we don't know the size of the accesses through both GEPs, we can't 853 // determine whether the struct fields accessed can't alias. 854 if (V1Size == MemoryLocation::UnknownSize || 855 V2Size == MemoryLocation::UnknownSize) 856 return MayAlias; 857 858 ConstantInt *C1 = 859 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1)); 860 ConstantInt *C2 = 861 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1)); 862 863 // If the last (struct) indices aren't constants, we can't say anything. 864 // If they're identical, the other indices might be also be dynamically 865 // equal, so the GEPs can alias. 866 if (!C1 || !C2 || C1 == C2) 867 return MayAlias; 868 869 // Find the last-indexed type of the GEP, i.e., the type you'd get if 870 // you stripped the last index. 871 // On the way, look at each indexed type. If there's something other 872 // than an array, different indices can lead to different final types. 873 SmallVector<Value *, 8> IntermediateIndices; 874 875 // Insert the first index; we don't need to check the type indexed 876 // through it as it only drops the pointer indirection. 877 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine"); 878 IntermediateIndices.push_back(GEP1->getOperand(1)); 879 880 // Insert all the remaining indices but the last one. 881 // Also, check that they all index through arrays. 882 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) { 883 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType( 884 GEP1->getSourceElementType(), IntermediateIndices))) 885 return MayAlias; 886 IntermediateIndices.push_back(GEP1->getOperand(i + 1)); 887 } 888 889 StructType *LastIndexedStruct = 890 dyn_cast<StructType>(GetElementPtrInst::getIndexedType( 891 GEP1->getSourceElementType(), IntermediateIndices)); 892 893 if (!LastIndexedStruct) 894 return MayAlias; 895 896 // We know that: 897 // - both GEPs begin indexing from the exact same pointer; 898 // - the last indices in both GEPs are constants, indexing into a struct; 899 // - said indices are different, hence, the pointed-to fields are different; 900 // - both GEPs only index through arrays prior to that. 901 // 902 // This lets us determine that the struct that GEP1 indexes into and the 903 // struct that GEP2 indexes into must either precisely overlap or be 904 // completely disjoint. Because they cannot partially overlap, indexing into 905 // different non-overlapping fields of the struct will never alias. 906 907 // Therefore, the only remaining thing needed to show that both GEPs can't 908 // alias is that the fields are not overlapping. 909 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct); 910 const uint64_t StructSize = SL->getSizeInBytes(); 911 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue()); 912 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue()); 913 914 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size, 915 uint64_t V2Off, uint64_t V2Size) { 916 return V1Off < V2Off && V1Off + V1Size <= V2Off && 917 ((V2Off + V2Size <= StructSize) || 918 (V2Off + V2Size - StructSize <= V1Off)); 919 }; 920 921 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) || 922 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size)) 923 return NoAlias; 924 925 return MayAlias; 926 } 927 928 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction 929 /// against another pointer. We know that V1 is a GEP, but we don't know 930 /// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, DL), 931 /// UnderlyingV2 is the same for V2. 932 /// 933 AliasResult BasicAliasAnalysis::aliasGEP( 934 const GEPOperator *GEP1, uint64_t V1Size, const AAMDNodes &V1AAInfo, 935 const Value *V2, uint64_t V2Size, const AAMDNodes &V2AAInfo, 936 const Value *UnderlyingV1, const Value *UnderlyingV2) { 937 int64_t GEP1BaseOffset; 938 bool GEP1MaxLookupReached; 939 SmallVector<VariableGEPIndex, 4> GEP1VariableIndices; 940 941 // We have to get two AssumptionCaches here because GEP1 and V2 may be from 942 // different functions. 943 // FIXME: This really doesn't make any sense. We get a dominator tree below 944 // that can only refer to a single function. But this function (aliasGEP) is 945 // a method on an immutable pass that can be called when there *isn't* 946 // a single function. The old pass management layer makes this "work", but 947 // this isn't really a clean solution. 948 AssumptionCacheTracker &ACT = getAnalysis<AssumptionCacheTracker>(); 949 AssumptionCache *AC1 = nullptr, *AC2 = nullptr; 950 if (auto *GEP1I = dyn_cast<Instruction>(GEP1)) 951 AC1 = &ACT.getAssumptionCache( 952 const_cast<Function &>(*GEP1I->getParent()->getParent())); 953 if (auto *I2 = dyn_cast<Instruction>(V2)) 954 AC2 = &ACT.getAssumptionCache( 955 const_cast<Function &>(*I2->getParent()->getParent())); 956 957 DominatorTreeWrapperPass *DTWP = 958 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 959 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; 960 961 // If we have two gep instructions with must-alias or not-alias'ing base 962 // pointers, figure out if the indexes to the GEP tell us anything about the 963 // derived pointer. 964 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 965 // Do the base pointers alias? 966 AliasResult BaseAlias = 967 aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(), 968 UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes()); 969 970 // Check for geps of non-aliasing underlying pointers where the offsets are 971 // identical. 972 if ((BaseAlias == MayAlias) && V1Size == V2Size) { 973 // Do the base pointers alias assuming type and size. 974 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, 975 V1AAInfo, UnderlyingV2, 976 V2Size, V2AAInfo); 977 if (PreciseBaseAlias == NoAlias) { 978 // See if the computed offset from the common pointer tells us about the 979 // relation of the resulting pointer. 980 int64_t GEP2BaseOffset; 981 bool GEP2MaxLookupReached; 982 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 983 const Value *GEP2BasePtr = 984 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, 985 GEP2MaxLookupReached, *DL, AC2, DT); 986 const Value *GEP1BasePtr = 987 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, 988 GEP1MaxLookupReached, *DL, AC1, DT); 989 // DecomposeGEPExpression and GetUnderlyingObject should return the 990 // same result except when DecomposeGEPExpression has no DataLayout. 991 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 992 assert(!DL && 993 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 994 return MayAlias; 995 } 996 // If the max search depth is reached the result is undefined 997 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 998 return MayAlias; 999 1000 // Same offsets. 1001 if (GEP1BaseOffset == GEP2BaseOffset && 1002 GEP1VariableIndices == GEP2VariableIndices) 1003 return NoAlias; 1004 GEP1VariableIndices.clear(); 1005 } 1006 } 1007 1008 // If we get a No or May, then return it immediately, no amount of analysis 1009 // will improve this situation. 1010 if (BaseAlias != MustAlias) return BaseAlias; 1011 1012 // Otherwise, we have a MustAlias. Since the base pointers alias each other 1013 // exactly, see if the computed offset from the common pointer tells us 1014 // about the relation of the resulting pointer. 1015 const Value *GEP1BasePtr = 1016 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, 1017 GEP1MaxLookupReached, *DL, AC1, DT); 1018 1019 int64_t GEP2BaseOffset; 1020 bool GEP2MaxLookupReached; 1021 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 1022 const Value *GEP2BasePtr = 1023 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, 1024 GEP2MaxLookupReached, *DL, AC2, DT); 1025 1026 // DecomposeGEPExpression and GetUnderlyingObject should return the 1027 // same result except when DecomposeGEPExpression has no DataLayout. 1028 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 1029 assert(!DL && 1030 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 1031 return MayAlias; 1032 } 1033 1034 // If we know the two GEPs are based off of the exact same pointer (and not 1035 // just the same underlying object), see if that tells us anything about 1036 // the resulting pointers. 1037 if (DL && GEP1->getPointerOperand() == GEP2->getPointerOperand()) { 1038 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, *DL); 1039 // If we couldn't find anything interesting, don't abandon just yet. 1040 if (R != MayAlias) 1041 return R; 1042 } 1043 1044 // If the max search depth is reached the result is undefined 1045 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1046 return MayAlias; 1047 1048 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1049 // symbolic difference. 1050 GEP1BaseOffset -= GEP2BaseOffset; 1051 GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices); 1052 1053 } else { 1054 // Check to see if these two pointers are related by the getelementptr 1055 // instruction. If one pointer is a GEP with a non-zero index of the other 1056 // pointer, we know they cannot alias. 1057 1058 // If both accesses are unknown size, we can't do anything useful here. 1059 if (V1Size == MemoryLocation::UnknownSize && 1060 V2Size == MemoryLocation::UnknownSize) 1061 return MayAlias; 1062 1063 AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, 1064 AAMDNodes(), V2, V2Size, V2AAInfo); 1065 if (R != MustAlias) 1066 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 1067 // If V2 is known not to alias GEP base pointer, then the two values 1068 // cannot alias per GEP semantics: "A pointer value formed from a 1069 // getelementptr instruction is associated with the addresses associated 1070 // with the first operand of the getelementptr". 1071 return R; 1072 1073 const Value *GEP1BasePtr = 1074 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, 1075 GEP1MaxLookupReached, *DL, AC1, DT); 1076 1077 // DecomposeGEPExpression and GetUnderlyingObject should return the 1078 // same result except when DecomposeGEPExpression has no DataLayout. 1079 if (GEP1BasePtr != UnderlyingV1) { 1080 assert(!DL && 1081 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 1082 return MayAlias; 1083 } 1084 // If the max search depth is reached the result is undefined 1085 if (GEP1MaxLookupReached) 1086 return MayAlias; 1087 } 1088 1089 // In the two GEP Case, if there is no difference in the offsets of the 1090 // computed pointers, the resultant pointers are a must alias. This 1091 // hapens when we have two lexically identical GEP's (for example). 1092 // 1093 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 1094 // must aliases the GEP, the end result is a must alias also. 1095 if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty()) 1096 return MustAlias; 1097 1098 // If there is a constant difference between the pointers, but the difference 1099 // is less than the size of the associated memory object, then we know 1100 // that the objects are partially overlapping. If the difference is 1101 // greater, we know they do not overlap. 1102 if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) { 1103 if (GEP1BaseOffset >= 0) { 1104 if (V2Size != MemoryLocation::UnknownSize) { 1105 if ((uint64_t)GEP1BaseOffset < V2Size) 1106 return PartialAlias; 1107 return NoAlias; 1108 } 1109 } else { 1110 // We have the situation where: 1111 // + + 1112 // | BaseOffset | 1113 // ---------------->| 1114 // |-->V1Size |-------> V2Size 1115 // GEP1 V2 1116 // We need to know that V2Size is not unknown, otherwise we might have 1117 // stripped a gep with negative index ('gep <ptr>, -1, ...). 1118 if (V1Size != MemoryLocation::UnknownSize && 1119 V2Size != MemoryLocation::UnknownSize) { 1120 if (-(uint64_t)GEP1BaseOffset < V1Size) 1121 return PartialAlias; 1122 return NoAlias; 1123 } 1124 } 1125 } 1126 1127 // Try to distinguish something like &A[i][1] against &A[42][0]. 1128 // Grab the least significant bit set in any of the scales. 1129 if (!GEP1VariableIndices.empty()) { 1130 uint64_t Modulo = 0; 1131 for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i) 1132 Modulo |= (uint64_t) GEP1VariableIndices[i].Scale; 1133 Modulo = Modulo ^ (Modulo & (Modulo - 1)); 1134 1135 // We can compute the difference between the two addresses 1136 // mod Modulo. Check whether that difference guarantees that the 1137 // two locations do not alias. 1138 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1); 1139 if (V1Size != MemoryLocation::UnknownSize && 1140 V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size && 1141 V1Size <= Modulo - ModOffset) 1142 return NoAlias; 1143 } 1144 1145 // Statically, we can see that the base objects are the same, but the 1146 // pointers have dynamic offsets which we can't resolve. And none of our 1147 // little tricks above worked. 1148 // 1149 // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the 1150 // practical effect of this is protecting TBAA in the case of dynamic 1151 // indices into arrays of unions or malloc'd memory. 1152 return PartialAlias; 1153 } 1154 1155 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1156 // If the results agree, take it. 1157 if (A == B) 1158 return A; 1159 // A mix of PartialAlias and MustAlias is PartialAlias. 1160 if ((A == PartialAlias && B == MustAlias) || 1161 (B == PartialAlias && A == MustAlias)) 1162 return PartialAlias; 1163 // Otherwise, we don't know anything. 1164 return MayAlias; 1165 } 1166 1167 /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select 1168 /// instruction against another. 1169 AliasResult BasicAliasAnalysis::aliasSelect(const SelectInst *SI, 1170 uint64_t SISize, 1171 const AAMDNodes &SIAAInfo, 1172 const Value *V2, uint64_t V2Size, 1173 const AAMDNodes &V2AAInfo) { 1174 // If the values are Selects with the same condition, we can do a more precise 1175 // check: just check for aliases between the values on corresponding arms. 1176 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1177 if (SI->getCondition() == SI2->getCondition()) { 1178 AliasResult Alias = 1179 aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, 1180 SI2->getTrueValue(), V2Size, V2AAInfo); 1181 if (Alias == MayAlias) 1182 return MayAlias; 1183 AliasResult ThisAlias = 1184 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo, 1185 SI2->getFalseValue(), V2Size, V2AAInfo); 1186 return MergeAliasResults(ThisAlias, Alias); 1187 } 1188 1189 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1190 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1191 AliasResult Alias = 1192 aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), SISize, SIAAInfo); 1193 if (Alias == MayAlias) 1194 return MayAlias; 1195 1196 AliasResult ThisAlias = 1197 aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo); 1198 return MergeAliasResults(ThisAlias, Alias); 1199 } 1200 1201 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction 1202 // against another. 1203 AliasResult BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize, 1204 const AAMDNodes &PNAAInfo, 1205 const Value *V2, uint64_t V2Size, 1206 const AAMDNodes &V2AAInfo) { 1207 // Track phi nodes we have visited. We use this information when we determine 1208 // value equivalence. 1209 VisitedPhiBBs.insert(PN->getParent()); 1210 1211 // If the values are PHIs in the same block, we can do a more precise 1212 // as well as efficient check: just check for aliases between the values 1213 // on corresponding edges. 1214 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1215 if (PN2->getParent() == PN->getParent()) { 1216 LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo), 1217 MemoryLocation(V2, V2Size, V2AAInfo)); 1218 if (PN > V2) 1219 std::swap(Locs.first, Locs.second); 1220 // Analyse the PHIs' inputs under the assumption that the PHIs are 1221 // NoAlias. 1222 // If the PHIs are May/MustAlias there must be (recursively) an input 1223 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or 1224 // there must be an operation on the PHIs within the PHIs' value cycle 1225 // that causes a MayAlias. 1226 // Pretend the phis do not alias. 1227 AliasResult Alias = NoAlias; 1228 assert(AliasCache.count(Locs) && 1229 "There must exist an entry for the phi node"); 1230 AliasResult OrigAliasResult = AliasCache[Locs]; 1231 AliasCache[Locs] = NoAlias; 1232 1233 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1234 AliasResult ThisAlias = 1235 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo, 1236 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1237 V2Size, V2AAInfo); 1238 Alias = MergeAliasResults(ThisAlias, Alias); 1239 if (Alias == MayAlias) 1240 break; 1241 } 1242 1243 // Reset if speculation failed. 1244 if (Alias != NoAlias) 1245 AliasCache[Locs] = OrigAliasResult; 1246 1247 return Alias; 1248 } 1249 1250 SmallPtrSet<Value*, 4> UniqueSrc; 1251 SmallVector<Value*, 4> V1Srcs; 1252 for (Value *PV1 : PN->incoming_values()) { 1253 if (isa<PHINode>(PV1)) 1254 // If any of the source itself is a PHI, return MayAlias conservatively 1255 // to avoid compile time explosion. The worst possible case is if both 1256 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1257 // and 'n' are the number of PHI sources. 1258 return MayAlias; 1259 if (UniqueSrc.insert(PV1).second) 1260 V1Srcs.push_back(PV1); 1261 } 1262 1263 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, 1264 V1Srcs[0], PNSize, PNAAInfo); 1265 // Early exit if the check of the first PHI source against V2 is MayAlias. 1266 // Other results are not possible. 1267 if (Alias == MayAlias) 1268 return MayAlias; 1269 1270 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1271 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1272 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1273 Value *V = V1Srcs[i]; 1274 1275 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, 1276 V, PNSize, PNAAInfo); 1277 Alias = MergeAliasResults(ThisAlias, Alias); 1278 if (Alias == MayAlias) 1279 break; 1280 } 1281 1282 return Alias; 1283 } 1284 1285 // aliasCheck - Provide a bunch of ad-hoc rules to disambiguate in common cases, 1286 // such as array references. 1287 // 1288 AliasResult BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, 1289 AAMDNodes V1AAInfo, const Value *V2, 1290 uint64_t V2Size, 1291 AAMDNodes V2AAInfo) { 1292 // If either of the memory references is empty, it doesn't matter what the 1293 // pointer values are. 1294 if (V1Size == 0 || V2Size == 0) 1295 return NoAlias; 1296 1297 // Strip off any casts if they exist. 1298 V1 = V1->stripPointerCasts(); 1299 V2 = V2->stripPointerCasts(); 1300 1301 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1302 // value for undef that aliases nothing in the program. 1303 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1304 return NoAlias; 1305 1306 // Are we checking for alias of the same value? 1307 // Because we look 'through' phi nodes we could look at "Value" pointers from 1308 // different iterations. We must therefore make sure that this is not the 1309 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1310 // happen by looking at the visited phi nodes and making sure they cannot 1311 // reach the value. 1312 if (isValueEqualInPotentialCycles(V1, V2)) 1313 return MustAlias; 1314 1315 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1316 return NoAlias; // Scalars cannot alias each other 1317 1318 // Figure out what objects these things are pointing to if we can. 1319 const Value *O1 = GetUnderlyingObject(V1, *DL, MaxLookupSearchDepth); 1320 const Value *O2 = GetUnderlyingObject(V2, *DL, MaxLookupSearchDepth); 1321 1322 // Null values in the default address space don't point to any object, so they 1323 // don't alias any other pointer. 1324 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1325 if (CPN->getType()->getAddressSpace() == 0) 1326 return NoAlias; 1327 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1328 if (CPN->getType()->getAddressSpace() == 0) 1329 return NoAlias; 1330 1331 if (O1 != O2) { 1332 // If V1/V2 point to two different objects we know that we have no alias. 1333 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1334 return NoAlias; 1335 1336 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1337 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1338 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1339 return NoAlias; 1340 1341 // Function arguments can't alias with things that are known to be 1342 // unambigously identified at the function level. 1343 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1344 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1345 return NoAlias; 1346 1347 // Most objects can't alias null. 1348 if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) || 1349 (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2))) 1350 return NoAlias; 1351 1352 // If one pointer is the result of a call/invoke or load and the other is a 1353 // non-escaping local object within the same function, then we know the 1354 // object couldn't escape to a point where the call could return it. 1355 // 1356 // Note that if the pointers are in different functions, there are a 1357 // variety of complications. A call with a nocapture argument may still 1358 // temporary store the nocapture argument's value in a temporary memory 1359 // location if that memory location doesn't escape. Or it may pass a 1360 // nocapture value to other functions as long as they don't capture it. 1361 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2)) 1362 return NoAlias; 1363 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1)) 1364 return NoAlias; 1365 } 1366 1367 // If the size of one access is larger than the entire object on the other 1368 // side, then we know such behavior is undefined and can assume no alias. 1369 if (DL) 1370 if ((V1Size != MemoryLocation::UnknownSize && 1371 isObjectSmallerThan(O2, V1Size, *DL, *TLI)) || 1372 (V2Size != MemoryLocation::UnknownSize && 1373 isObjectSmallerThan(O1, V2Size, *DL, *TLI))) 1374 return NoAlias; 1375 1376 // Check the cache before climbing up use-def chains. This also terminates 1377 // otherwise infinitely recursive queries. 1378 LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo), 1379 MemoryLocation(V2, V2Size, V2AAInfo)); 1380 if (V1 > V2) 1381 std::swap(Locs.first, Locs.second); 1382 std::pair<AliasCacheTy::iterator, bool> Pair = 1383 AliasCache.insert(std::make_pair(Locs, MayAlias)); 1384 if (!Pair.second) 1385 return Pair.first->second; 1386 1387 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the 1388 // GEP can't simplify, we don't even look at the PHI cases. 1389 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) { 1390 std::swap(V1, V2); 1391 std::swap(V1Size, V2Size); 1392 std::swap(O1, O2); 1393 std::swap(V1AAInfo, V2AAInfo); 1394 } 1395 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1396 AliasResult Result = aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2); 1397 if (Result != MayAlias) return AliasCache[Locs] = Result; 1398 } 1399 1400 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { 1401 std::swap(V1, V2); 1402 std::swap(V1Size, V2Size); 1403 std::swap(V1AAInfo, V2AAInfo); 1404 } 1405 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1406 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, 1407 V2, V2Size, V2AAInfo); 1408 if (Result != MayAlias) return AliasCache[Locs] = Result; 1409 } 1410 1411 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { 1412 std::swap(V1, V2); 1413 std::swap(V1Size, V2Size); 1414 std::swap(V1AAInfo, V2AAInfo); 1415 } 1416 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1417 AliasResult Result = aliasSelect(S1, V1Size, V1AAInfo, 1418 V2, V2Size, V2AAInfo); 1419 if (Result != MayAlias) return AliasCache[Locs] = Result; 1420 } 1421 1422 // If both pointers are pointing into the same object and one of them 1423 // accesses is accessing the entire object, then the accesses must 1424 // overlap in some way. 1425 if (DL && O1 == O2) 1426 if ((V1Size != MemoryLocation::UnknownSize && 1427 isObjectSize(O1, V1Size, *DL, *TLI)) || 1428 (V2Size != MemoryLocation::UnknownSize && 1429 isObjectSize(O2, V2Size, *DL, *TLI))) 1430 return AliasCache[Locs] = PartialAlias; 1431 1432 AliasResult Result = 1433 AliasAnalysis::alias(MemoryLocation(V1, V1Size, V1AAInfo), 1434 MemoryLocation(V2, V2Size, V2AAInfo)); 1435 return AliasCache[Locs] = Result; 1436 } 1437 1438 bool BasicAliasAnalysis::isValueEqualInPotentialCycles(const Value *V, 1439 const Value *V2) { 1440 if (V != V2) 1441 return false; 1442 1443 const Instruction *Inst = dyn_cast<Instruction>(V); 1444 if (!Inst) 1445 return true; 1446 1447 if (VisitedPhiBBs.empty()) 1448 return true; 1449 1450 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1451 return false; 1452 1453 // Use dominance or loop info if available. 1454 DominatorTreeWrapperPass *DTWP = 1455 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 1456 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; 1457 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 1458 LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; 1459 1460 // Make sure that the visited phis cannot reach the Value. This ensures that 1461 // the Values cannot come from different iterations of a potential cycle the 1462 // phi nodes could be involved in. 1463 for (auto *P : VisitedPhiBBs) 1464 if (isPotentiallyReachable(P->begin(), Inst, DT, LI)) 1465 return false; 1466 1467 return true; 1468 } 1469 1470 /// GetIndexDifference - Dest and Src are the variable indices from two 1471 /// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base 1472 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic 1473 /// difference between the two pointers. 1474 void BasicAliasAnalysis::GetIndexDifference( 1475 SmallVectorImpl<VariableGEPIndex> &Dest, 1476 const SmallVectorImpl<VariableGEPIndex> &Src) { 1477 if (Src.empty()) 1478 return; 1479 1480 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 1481 const Value *V = Src[i].V; 1482 ExtensionKind Extension = Src[i].Extension; 1483 int64_t Scale = Src[i].Scale; 1484 1485 // Find V in Dest. This is N^2, but pointer indices almost never have more 1486 // than a few variable indexes. 1487 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 1488 if (!isValueEqualInPotentialCycles(Dest[j].V, V) || 1489 Dest[j].Extension != Extension) 1490 continue; 1491 1492 // If we found it, subtract off Scale V's from the entry in Dest. If it 1493 // goes to zero, remove the entry. 1494 if (Dest[j].Scale != Scale) 1495 Dest[j].Scale -= Scale; 1496 else 1497 Dest.erase(Dest.begin() + j); 1498 Scale = 0; 1499 break; 1500 } 1501 1502 // If we didn't consume this entry, add it to the end of the Dest list. 1503 if (Scale) { 1504 VariableGEPIndex Entry = { V, Extension, -Scale }; 1505 Dest.push_back(Entry); 1506 } 1507 } 1508 } 1509