1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the primary stateless implementation of the 11 // Alias Analysis interface that implements identities (two different 12 // globals cannot alias, etc), but does no stateful analysis. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/Passes.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/Analysis/AssumptionCache.h" 21 #include "llvm/Analysis/CFG.h" 22 #include "llvm/Analysis/CaptureTracking.h" 23 #include "llvm/Analysis/InstructionSimplify.h" 24 #include "llvm/Analysis/LoopInfo.h" 25 #include "llvm/Analysis/MemoryBuiltins.h" 26 #include "llvm/Analysis/ValueTracking.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/Dominators.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/GetElementPtrTypeIterator.h" 33 #include "llvm/IR/GlobalAlias.h" 34 #include "llvm/IR/GlobalVariable.h" 35 #include "llvm/IR/Instructions.h" 36 #include "llvm/IR/IntrinsicInst.h" 37 #include "llvm/IR/LLVMContext.h" 38 #include "llvm/IR/Operator.h" 39 #include "llvm/Pass.h" 40 #include "llvm/Support/ErrorHandling.h" 41 #include "llvm/Target/TargetLibraryInfo.h" 42 #include <algorithm> 43 using namespace llvm; 44 45 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 46 /// in a cycle. Because we are analysing 'through' phi nodes we need to be 47 /// careful with value equivalence. We use reachability to make sure a value 48 /// cannot be involved in a cycle. 49 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 50 51 // The max limit of the search depth in DecomposeGEPExpression() and 52 // GetUnderlyingObject(), both functions need to use the same search 53 // depth otherwise the algorithm in aliasGEP will assert. 54 static const unsigned MaxLookupSearchDepth = 6; 55 56 //===----------------------------------------------------------------------===// 57 // Useful predicates 58 //===----------------------------------------------------------------------===// 59 60 /// isNonEscapingLocalObject - Return true if the pointer is to a function-local 61 /// object that never escapes from the function. 62 static bool isNonEscapingLocalObject(const Value *V) { 63 // If this is a local allocation, check to see if it escapes. 64 if (isa<AllocaInst>(V) || isNoAliasCall(V)) 65 // Set StoreCaptures to True so that we can assume in our callers that the 66 // pointer is not the result of a load instruction. Currently 67 // PointerMayBeCaptured doesn't have any special analysis for the 68 // StoreCaptures=false case; if it did, our callers could be refined to be 69 // more precise. 70 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 71 72 // If this is an argument that corresponds to a byval or noalias argument, 73 // then it has not escaped before entering the function. Check if it escapes 74 // inside the function. 75 if (const Argument *A = dyn_cast<Argument>(V)) 76 if (A->hasByValAttr() || A->hasNoAliasAttr()) 77 // Note even if the argument is marked nocapture we still need to check 78 // for copies made inside the function. The nocapture attribute only 79 // specifies that there are no copies made that outlive the function. 80 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 81 82 return false; 83 } 84 85 /// isEscapeSource - Return true if the pointer is one which would have 86 /// been considered an escape by isNonEscapingLocalObject. 87 static bool isEscapeSource(const Value *V) { 88 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V)) 89 return true; 90 91 // The load case works because isNonEscapingLocalObject considers all 92 // stores to be escapes (it passes true for the StoreCaptures argument 93 // to PointerMayBeCaptured). 94 if (isa<LoadInst>(V)) 95 return true; 96 97 return false; 98 } 99 100 /// getObjectSize - Return the size of the object specified by V, or 101 /// UnknownSize if unknown. 102 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 103 const TargetLibraryInfo &TLI, 104 bool RoundToAlign = false) { 105 uint64_t Size; 106 if (getObjectSize(V, Size, &DL, &TLI, RoundToAlign)) 107 return Size; 108 return AliasAnalysis::UnknownSize; 109 } 110 111 /// isObjectSmallerThan - Return true if we can prove that the object specified 112 /// by V is smaller than Size. 113 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 114 const DataLayout &DL, 115 const TargetLibraryInfo &TLI) { 116 // Note that the meanings of the "object" are slightly different in the 117 // following contexts: 118 // c1: llvm::getObjectSize() 119 // c2: llvm.objectsize() intrinsic 120 // c3: isObjectSmallerThan() 121 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 122 // refers to the "entire object". 123 // 124 // Consider this example: 125 // char *p = (char*)malloc(100) 126 // char *q = p+80; 127 // 128 // In the context of c1 and c2, the "object" pointed by q refers to the 129 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 130 // 131 // However, in the context of c3, the "object" refers to the chunk of memory 132 // being allocated. So, the "object" has 100 bytes, and q points to the middle 133 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 134 // parameter, before the llvm::getObjectSize() is called to get the size of 135 // entire object, we should: 136 // - either rewind the pointer q to the base-address of the object in 137 // question (in this case rewind to p), or 138 // - just give up. It is up to caller to make sure the pointer is pointing 139 // to the base address the object. 140 // 141 // We go for 2nd option for simplicity. 142 if (!isIdentifiedObject(V)) 143 return false; 144 145 // This function needs to use the aligned object size because we allow 146 // reads a bit past the end given sufficient alignment. 147 uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/true); 148 149 return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size; 150 } 151 152 /// isObjectSize - Return true if we can prove that the object specified 153 /// by V has size Size. 154 static bool isObjectSize(const Value *V, uint64_t Size, 155 const DataLayout &DL, const TargetLibraryInfo &TLI) { 156 uint64_t ObjectSize = getObjectSize(V, DL, TLI); 157 return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size; 158 } 159 160 //===----------------------------------------------------------------------===// 161 // GetElementPtr Instruction Decomposition and Analysis 162 //===----------------------------------------------------------------------===// 163 164 namespace { 165 enum ExtensionKind { 166 EK_NotExtended, 167 EK_SignExt, 168 EK_ZeroExt 169 }; 170 171 struct VariableGEPIndex { 172 const Value *V; 173 ExtensionKind Extension; 174 int64_t Scale; 175 176 bool operator==(const VariableGEPIndex &Other) const { 177 return V == Other.V && Extension == Other.Extension && 178 Scale == Other.Scale; 179 } 180 181 bool operator!=(const VariableGEPIndex &Other) const { 182 return !operator==(Other); 183 } 184 }; 185 } 186 187 188 /// GetLinearExpression - Analyze the specified value as a linear expression: 189 /// "A*V + B", where A and B are constant integers. Return the scale and offset 190 /// values as APInts and return V as a Value*, and return whether we looked 191 /// through any sign or zero extends. The incoming Value is known to have 192 /// IntegerType and it may already be sign or zero extended. 193 /// 194 /// Note that this looks through extends, so the high bits may not be 195 /// represented in the result. 196 static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset, 197 ExtensionKind &Extension, 198 const DataLayout &DL, unsigned Depth, 199 AssumptionCache *AC, DominatorTree *DT) { 200 assert(V->getType()->isIntegerTy() && "Not an integer value"); 201 202 // Limit our recursion depth. 203 if (Depth == 6) { 204 Scale = 1; 205 Offset = 0; 206 return V; 207 } 208 209 if (ConstantInt *Const = dyn_cast<ConstantInt>(V)) { 210 // if it's a constant, just convert it to an offset 211 // and remove the variable. 212 Offset += Const->getValue(); 213 assert(Scale == 0 && "Constant values don't have a scale"); 214 return V; 215 } 216 217 if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 218 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 219 switch (BOp->getOpcode()) { 220 default: break; 221 case Instruction::Or: 222 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 223 // analyze it. 224 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &DL, 0, AC, 225 BOp, DT)) 226 break; 227 // FALL THROUGH. 228 case Instruction::Add: 229 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 230 DL, Depth + 1, AC, DT); 231 Offset += RHSC->getValue(); 232 return V; 233 case Instruction::Mul: 234 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 235 DL, Depth + 1, AC, DT); 236 Offset *= RHSC->getValue(); 237 Scale *= RHSC->getValue(); 238 return V; 239 case Instruction::Shl: 240 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension, 241 DL, Depth + 1, AC, DT); 242 Offset <<= RHSC->getValue().getLimitedValue(); 243 Scale <<= RHSC->getValue().getLimitedValue(); 244 return V; 245 } 246 } 247 } 248 249 // Since GEP indices are sign extended anyway, we don't care about the high 250 // bits of a sign or zero extended value - just scales and offsets. The 251 // extensions have to be consistent though. 252 if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) || 253 (isa<ZExtInst>(V) && Extension != EK_SignExt)) { 254 Value *CastOp = cast<CastInst>(V)->getOperand(0); 255 unsigned OldWidth = Scale.getBitWidth(); 256 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 257 Scale = Scale.trunc(SmallWidth); 258 Offset = Offset.trunc(SmallWidth); 259 Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt; 260 261 Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension, DL, 262 Depth + 1, AC, DT); 263 Scale = Scale.zext(OldWidth); 264 265 // We have to sign-extend even if Extension == EK_ZeroExt as we can't 266 // decompose a sign extension (i.e. zext(x - 1) != zext(x) - zext(-1)). 267 Offset = Offset.sext(OldWidth); 268 269 return Result; 270 } 271 272 Scale = 1; 273 Offset = 0; 274 return V; 275 } 276 277 /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it 278 /// into a base pointer with a constant offset and a number of scaled symbolic 279 /// offsets. 280 /// 281 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale in 282 /// the VarIndices vector) are Value*'s that are known to be scaled by the 283 /// specified amount, but which may have other unrepresented high bits. As such, 284 /// the gep cannot necessarily be reconstructed from its decomposed form. 285 /// 286 /// When DataLayout is around, this function is capable of analyzing everything 287 /// that GetUnderlyingObject can look through. To be able to do that 288 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search 289 /// depth (MaxLookupSearchDepth). 290 /// When DataLayout not is around, it just looks through pointer casts. 291 /// 292 static const Value * 293 DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, 294 SmallVectorImpl<VariableGEPIndex> &VarIndices, 295 bool &MaxLookupReached, const DataLayout *DL, 296 AssumptionCache *AC, DominatorTree *DT) { 297 // Limit recursion depth to limit compile time in crazy cases. 298 unsigned MaxLookup = MaxLookupSearchDepth; 299 MaxLookupReached = false; 300 301 BaseOffs = 0; 302 do { 303 // See if this is a bitcast or GEP. 304 const Operator *Op = dyn_cast<Operator>(V); 305 if (!Op) { 306 // The only non-operator case we can handle are GlobalAliases. 307 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 308 if (!GA->mayBeOverridden()) { 309 V = GA->getAliasee(); 310 continue; 311 } 312 } 313 return V; 314 } 315 316 if (Op->getOpcode() == Instruction::BitCast || 317 Op->getOpcode() == Instruction::AddrSpaceCast) { 318 V = Op->getOperand(0); 319 continue; 320 } 321 322 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 323 if (!GEPOp) { 324 // If it's not a GEP, hand it off to SimplifyInstruction to see if it 325 // can come up with something. This matches what GetUnderlyingObject does. 326 if (const Instruction *I = dyn_cast<Instruction>(V)) 327 // TODO: Get a DominatorTree and AssumptionCache and use them here 328 // (these are both now available in this function, but this should be 329 // updated when GetUnderlyingObject is updated). TLI should be 330 // provided also. 331 if (const Value *Simplified = 332 SimplifyInstruction(const_cast<Instruction *>(I), DL)) { 333 V = Simplified; 334 continue; 335 } 336 337 return V; 338 } 339 340 // Don't attempt to analyze GEPs over unsized objects. 341 if (!GEPOp->getOperand(0)->getType()->getPointerElementType()->isSized()) 342 return V; 343 344 // If we are lacking DataLayout information, we can't compute the offets of 345 // elements computed by GEPs. However, we can handle bitcast equivalent 346 // GEPs. 347 if (!DL) { 348 if (!GEPOp->hasAllZeroIndices()) 349 return V; 350 V = GEPOp->getOperand(0); 351 continue; 352 } 353 354 unsigned AS = GEPOp->getPointerAddressSpace(); 355 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 356 gep_type_iterator GTI = gep_type_begin(GEPOp); 357 for (User::const_op_iterator I = GEPOp->op_begin()+1, 358 E = GEPOp->op_end(); I != E; ++I) { 359 Value *Index = *I; 360 // Compute the (potentially symbolic) offset in bytes for this index. 361 if (StructType *STy = dyn_cast<StructType>(*GTI++)) { 362 // For a struct, add the member offset. 363 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 364 if (FieldNo == 0) continue; 365 366 BaseOffs += DL->getStructLayout(STy)->getElementOffset(FieldNo); 367 continue; 368 } 369 370 // For an array/pointer, add the element offset, explicitly scaled. 371 if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 372 if (CIdx->isZero()) continue; 373 BaseOffs += DL->getTypeAllocSize(*GTI)*CIdx->getSExtValue(); 374 continue; 375 } 376 377 uint64_t Scale = DL->getTypeAllocSize(*GTI); 378 ExtensionKind Extension = EK_NotExtended; 379 380 // If the integer type is smaller than the pointer size, it is implicitly 381 // sign extended to pointer size. 382 unsigned Width = Index->getType()->getIntegerBitWidth(); 383 if (DL->getPointerSizeInBits(AS) > Width) 384 Extension = EK_SignExt; 385 386 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 387 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 388 Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, 389 *DL, 0, AC, DT); 390 391 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 392 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 393 BaseOffs += IndexOffset.getSExtValue()*Scale; 394 Scale *= IndexScale.getSExtValue(); 395 396 // If we already had an occurrence of this index variable, merge this 397 // scale into it. For example, we want to handle: 398 // A[x][x] -> x*16 + x*4 -> x*20 399 // This also ensures that 'x' only appears in the index list once. 400 for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) { 401 if (VarIndices[i].V == Index && 402 VarIndices[i].Extension == Extension) { 403 Scale += VarIndices[i].Scale; 404 VarIndices.erase(VarIndices.begin()+i); 405 break; 406 } 407 } 408 409 // Make sure that we have a scale that makes sense for this target's 410 // pointer size. 411 if (unsigned ShiftBits = 64 - DL->getPointerSizeInBits(AS)) { 412 Scale <<= ShiftBits; 413 Scale = (int64_t)Scale >> ShiftBits; 414 } 415 416 if (Scale) { 417 VariableGEPIndex Entry = {Index, Extension, 418 static_cast<int64_t>(Scale)}; 419 VarIndices.push_back(Entry); 420 } 421 } 422 423 // Analyze the base pointer next. 424 V = GEPOp->getOperand(0); 425 } while (--MaxLookup); 426 427 // If the chain of expressions is too deep, just return early. 428 MaxLookupReached = true; 429 return V; 430 } 431 432 //===----------------------------------------------------------------------===// 433 // BasicAliasAnalysis Pass 434 //===----------------------------------------------------------------------===// 435 436 #ifndef NDEBUG 437 static const Function *getParent(const Value *V) { 438 if (const Instruction *inst = dyn_cast<Instruction>(V)) 439 return inst->getParent()->getParent(); 440 441 if (const Argument *arg = dyn_cast<Argument>(V)) 442 return arg->getParent(); 443 444 return nullptr; 445 } 446 447 static bool notDifferentParent(const Value *O1, const Value *O2) { 448 449 const Function *F1 = getParent(O1); 450 const Function *F2 = getParent(O2); 451 452 return !F1 || !F2 || F1 == F2; 453 } 454 #endif 455 456 namespace { 457 /// BasicAliasAnalysis - This is the primary alias analysis implementation. 458 struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis { 459 static char ID; // Class identification, replacement for typeinfo 460 BasicAliasAnalysis() : ImmutablePass(ID) { 461 initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry()); 462 } 463 464 void initializePass() override { 465 InitializeAliasAnalysis(this); 466 } 467 468 void getAnalysisUsage(AnalysisUsage &AU) const override { 469 AU.addRequired<AliasAnalysis>(); 470 AU.addRequired<AssumptionCacheTracker>(); 471 AU.addRequired<TargetLibraryInfo>(); 472 } 473 474 AliasResult alias(const Location &LocA, const Location &LocB) override { 475 assert(AliasCache.empty() && "AliasCache must be cleared after use!"); 476 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 477 "BasicAliasAnalysis doesn't support interprocedural queries."); 478 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, 479 LocB.Ptr, LocB.Size, LocB.AATags); 480 // AliasCache rarely has more than 1 or 2 elements, always use 481 // shrink_and_clear so it quickly returns to the inline capacity of the 482 // SmallDenseMap if it ever grows larger. 483 // FIXME: This should really be shrink_to_inline_capacity_and_clear(). 484 AliasCache.shrink_and_clear(); 485 VisitedPhiBBs.clear(); 486 return Alias; 487 } 488 489 ModRefResult getModRefInfo(ImmutableCallSite CS, 490 const Location &Loc) override; 491 492 ModRefResult getModRefInfo(ImmutableCallSite CS1, 493 ImmutableCallSite CS2) override; 494 495 /// pointsToConstantMemory - Chase pointers until we find a (constant 496 /// global) or not. 497 bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override; 498 499 /// Get the location associated with a pointer argument of a callsite. 500 Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx, 501 ModRefResult &Mask) override; 502 503 /// getModRefBehavior - Return the behavior when calling the given 504 /// call site. 505 ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override; 506 507 /// getModRefBehavior - Return the behavior when calling the given function. 508 /// For use when the call site is not known. 509 ModRefBehavior getModRefBehavior(const Function *F) override; 510 511 /// getAdjustedAnalysisPointer - This method is used when a pass implements 512 /// an analysis interface through multiple inheritance. If needed, it 513 /// should override this to adjust the this pointer as needed for the 514 /// specified pass info. 515 void *getAdjustedAnalysisPointer(const void *ID) override { 516 if (ID == &AliasAnalysis::ID) 517 return (AliasAnalysis*)this; 518 return this; 519 } 520 521 private: 522 // AliasCache - Track alias queries to guard against recursion. 523 typedef std::pair<Location, Location> LocPair; 524 typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy; 525 AliasCacheTy AliasCache; 526 527 /// \brief Track phi nodes we have visited. When interpret "Value" pointer 528 /// equality as value equality we need to make sure that the "Value" is not 529 /// part of a cycle. Otherwise, two uses could come from different 530 /// "iterations" of a cycle and see different values for the same "Value" 531 /// pointer. 532 /// The following example shows the problem: 533 /// %p = phi(%alloca1, %addr2) 534 /// %l = load %ptr 535 /// %addr1 = gep, %alloca2, 0, %l 536 /// %addr2 = gep %alloca2, 0, (%l + 1) 537 /// alias(%p, %addr1) -> MayAlias ! 538 /// store %l, ... 539 SmallPtrSet<const BasicBlock*, 8> VisitedPhiBBs; 540 541 // Visited - Track instructions visited by pointsToConstantMemory. 542 SmallPtrSet<const Value*, 16> Visited; 543 544 /// \brief Check whether two Values can be considered equivalent. 545 /// 546 /// In addition to pointer equivalence of \p V1 and \p V2 this checks 547 /// whether they can not be part of a cycle in the value graph by looking at 548 /// all visited phi nodes an making sure that the phis cannot reach the 549 /// value. We have to do this because we are looking through phi nodes (That 550 /// is we say noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 551 bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2); 552 553 /// \brief Dest and Src are the variable indices from two decomposed 554 /// GetElementPtr instructions GEP1 and GEP2 which have common base 555 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic 556 /// difference between the two pointers. 557 void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest, 558 const SmallVectorImpl<VariableGEPIndex> &Src); 559 560 // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP 561 // instruction against another. 562 AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size, 563 const AAMDNodes &V1AAInfo, 564 const Value *V2, uint64_t V2Size, 565 const AAMDNodes &V2AAInfo, 566 const Value *UnderlyingV1, const Value *UnderlyingV2); 567 568 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI 569 // instruction against another. 570 AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize, 571 const AAMDNodes &PNAAInfo, 572 const Value *V2, uint64_t V2Size, 573 const AAMDNodes &V2AAInfo); 574 575 /// aliasSelect - Disambiguate a Select instruction against another value. 576 AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize, 577 const AAMDNodes &SIAAInfo, 578 const Value *V2, uint64_t V2Size, 579 const AAMDNodes &V2AAInfo); 580 581 AliasResult aliasCheck(const Value *V1, uint64_t V1Size, 582 AAMDNodes V1AATag, 583 const Value *V2, uint64_t V2Size, 584 AAMDNodes V2AATag); 585 }; 586 } // End of anonymous namespace 587 588 // Register this pass... 589 char BasicAliasAnalysis::ID = 0; 590 INITIALIZE_AG_PASS_BEGIN(BasicAliasAnalysis, AliasAnalysis, "basicaa", 591 "Basic Alias Analysis (stateless AA impl)", 592 false, true, false) 593 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 594 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) 595 INITIALIZE_AG_PASS_END(BasicAliasAnalysis, AliasAnalysis, "basicaa", 596 "Basic Alias Analysis (stateless AA impl)", 597 false, true, false) 598 599 600 ImmutablePass *llvm::createBasicAliasAnalysisPass() { 601 return new BasicAliasAnalysis(); 602 } 603 604 /// pointsToConstantMemory - Returns whether the given pointer value 605 /// points to memory that is local to the function, with global constants being 606 /// considered local to all functions. 607 bool 608 BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) { 609 assert(Visited.empty() && "Visited must be cleared after use!"); 610 611 unsigned MaxLookup = 8; 612 SmallVector<const Value *, 16> Worklist; 613 Worklist.push_back(Loc.Ptr); 614 do { 615 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL); 616 if (!Visited.insert(V).second) { 617 Visited.clear(); 618 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 619 } 620 621 // An alloca instruction defines local memory. 622 if (OrLocal && isa<AllocaInst>(V)) 623 continue; 624 625 // A global constant counts as local memory for our purposes. 626 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 627 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 628 // global to be marked constant in some modules and non-constant in 629 // others. GV may even be a declaration, not a definition. 630 if (!GV->isConstant()) { 631 Visited.clear(); 632 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 633 } 634 continue; 635 } 636 637 // If both select values point to local memory, then so does the select. 638 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 639 Worklist.push_back(SI->getTrueValue()); 640 Worklist.push_back(SI->getFalseValue()); 641 continue; 642 } 643 644 // If all values incoming to a phi node point to local memory, then so does 645 // the phi. 646 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 647 // Don't bother inspecting phi nodes with many operands. 648 if (PN->getNumIncomingValues() > MaxLookup) { 649 Visited.clear(); 650 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 651 } 652 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 653 Worklist.push_back(PN->getIncomingValue(i)); 654 continue; 655 } 656 657 // Otherwise be conservative. 658 Visited.clear(); 659 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 660 661 } while (!Worklist.empty() && --MaxLookup); 662 663 Visited.clear(); 664 return Worklist.empty(); 665 } 666 667 static bool isMemsetPattern16(const Function *MS, 668 const TargetLibraryInfo &TLI) { 669 if (TLI.has(LibFunc::memset_pattern16) && 670 MS->getName() == "memset_pattern16") { 671 FunctionType *MemsetType = MS->getFunctionType(); 672 if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 && 673 isa<PointerType>(MemsetType->getParamType(0)) && 674 isa<PointerType>(MemsetType->getParamType(1)) && 675 isa<IntegerType>(MemsetType->getParamType(2))) 676 return true; 677 } 678 679 return false; 680 } 681 682 /// getModRefBehavior - Return the behavior when calling the given call site. 683 AliasAnalysis::ModRefBehavior 684 BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) { 685 if (CS.doesNotAccessMemory()) 686 // Can't do better than this. 687 return DoesNotAccessMemory; 688 689 ModRefBehavior Min = UnknownModRefBehavior; 690 691 // If the callsite knows it only reads memory, don't return worse 692 // than that. 693 if (CS.onlyReadsMemory()) 694 Min = OnlyReadsMemory; 695 696 // The AliasAnalysis base class has some smarts, lets use them. 697 return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min); 698 } 699 700 /// getModRefBehavior - Return the behavior when calling the given function. 701 /// For use when the call site is not known. 702 AliasAnalysis::ModRefBehavior 703 BasicAliasAnalysis::getModRefBehavior(const Function *F) { 704 // If the function declares it doesn't access memory, we can't do better. 705 if (F->doesNotAccessMemory()) 706 return DoesNotAccessMemory; 707 708 // For intrinsics, we can check the table. 709 if (unsigned iid = F->getIntrinsicID()) { 710 #define GET_INTRINSIC_MODREF_BEHAVIOR 711 #include "llvm/IR/Intrinsics.gen" 712 #undef GET_INTRINSIC_MODREF_BEHAVIOR 713 } 714 715 ModRefBehavior Min = UnknownModRefBehavior; 716 717 // If the function declares it only reads memory, go with that. 718 if (F->onlyReadsMemory()) 719 Min = OnlyReadsMemory; 720 721 const TargetLibraryInfo &TLI = getAnalysis<TargetLibraryInfo>(); 722 if (isMemsetPattern16(F, TLI)) 723 Min = OnlyAccessesArgumentPointees; 724 725 // Otherwise be conservative. 726 return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min); 727 } 728 729 AliasAnalysis::Location 730 BasicAliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx, 731 ModRefResult &Mask) { 732 Location Loc = AliasAnalysis::getArgLocation(CS, ArgIdx, Mask); 733 const TargetLibraryInfo &TLI = getAnalysis<TargetLibraryInfo>(); 734 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()); 735 if (II != nullptr) 736 switch (II->getIntrinsicID()) { 737 default: break; 738 case Intrinsic::memset: 739 case Intrinsic::memcpy: 740 case Intrinsic::memmove: { 741 assert((ArgIdx == 0 || ArgIdx == 1) && 742 "Invalid argument index for memory intrinsic"); 743 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) 744 Loc.Size = LenCI->getZExtValue(); 745 assert(Loc.Ptr == II->getArgOperand(ArgIdx) && 746 "Memory intrinsic location pointer not argument?"); 747 Mask = ArgIdx ? Ref : Mod; 748 break; 749 } 750 case Intrinsic::lifetime_start: 751 case Intrinsic::lifetime_end: 752 case Intrinsic::invariant_start: { 753 assert(ArgIdx == 1 && "Invalid argument index"); 754 assert(Loc.Ptr == II->getArgOperand(ArgIdx) && 755 "Intrinsic location pointer not argument?"); 756 Loc.Size = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(); 757 break; 758 } 759 case Intrinsic::invariant_end: { 760 assert(ArgIdx == 2 && "Invalid argument index"); 761 assert(Loc.Ptr == II->getArgOperand(ArgIdx) && 762 "Intrinsic location pointer not argument?"); 763 Loc.Size = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(); 764 break; 765 } 766 case Intrinsic::arm_neon_vld1: { 767 assert(ArgIdx == 0 && "Invalid argument index"); 768 assert(Loc.Ptr == II->getArgOperand(ArgIdx) && 769 "Intrinsic location pointer not argument?"); 770 // LLVM's vld1 and vst1 intrinsics currently only support a single 771 // vector register. 772 if (DL) 773 Loc.Size = DL->getTypeStoreSize(II->getType()); 774 break; 775 } 776 case Intrinsic::arm_neon_vst1: { 777 assert(ArgIdx == 0 && "Invalid argument index"); 778 assert(Loc.Ptr == II->getArgOperand(ArgIdx) && 779 "Intrinsic location pointer not argument?"); 780 if (DL) 781 Loc.Size = DL->getTypeStoreSize(II->getArgOperand(1)->getType()); 782 break; 783 } 784 } 785 786 // We can bound the aliasing properties of memset_pattern16 just as we can 787 // for memcpy/memset. This is particularly important because the 788 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 789 // whenever possible. 790 else if (CS.getCalledFunction() && 791 isMemsetPattern16(CS.getCalledFunction(), TLI)) { 792 assert((ArgIdx == 0 || ArgIdx == 1) && 793 "Invalid argument index for memset_pattern16"); 794 if (ArgIdx == 1) 795 Loc.Size = 16; 796 else if (const ConstantInt *LenCI = 797 dyn_cast<ConstantInt>(CS.getArgument(2))) 798 Loc.Size = LenCI->getZExtValue(); 799 assert(Loc.Ptr == CS.getArgument(ArgIdx) && 800 "memset_pattern16 location pointer not argument?"); 801 Mask = ArgIdx ? Ref : Mod; 802 } 803 // FIXME: Handle memset_pattern4 and memset_pattern8 also. 804 805 return Loc; 806 } 807 808 static bool isAssumeIntrinsic(ImmutableCallSite CS) { 809 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()); 810 if (II && II->getIntrinsicID() == Intrinsic::assume) 811 return true; 812 813 return false; 814 } 815 816 /// getModRefInfo - Check to see if the specified callsite can clobber the 817 /// specified memory object. Since we only look at local properties of this 818 /// function, we really can't say much about this query. We do, however, use 819 /// simple "address taken" analysis on local objects. 820 AliasAnalysis::ModRefResult 821 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, 822 const Location &Loc) { 823 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) && 824 "AliasAnalysis query involving multiple functions!"); 825 826 const Value *Object = GetUnderlyingObject(Loc.Ptr, DL); 827 828 // If this is a tail call and Loc.Ptr points to a stack location, we know that 829 // the tail call cannot access or modify the local stack. 830 // We cannot exclude byval arguments here; these belong to the caller of 831 // the current function not to the current function, and a tail callee 832 // may reference them. 833 if (isa<AllocaInst>(Object)) 834 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) 835 if (CI->isTailCall()) 836 return NoModRef; 837 838 // If the pointer is to a locally allocated object that does not escape, 839 // then the call can not mod/ref the pointer unless the call takes the pointer 840 // as an argument, and itself doesn't capture it. 841 if (!isa<Constant>(Object) && CS.getInstruction() != Object && 842 isNonEscapingLocalObject(Object)) { 843 bool PassedAsArg = false; 844 unsigned ArgNo = 0; 845 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 846 CI != CE; ++CI, ++ArgNo) { 847 // Only look at the no-capture or byval pointer arguments. If this 848 // pointer were passed to arguments that were neither of these, then it 849 // couldn't be no-capture. 850 if (!(*CI)->getType()->isPointerTy() || 851 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo))) 852 continue; 853 854 // If this is a no-capture pointer argument, see if we can tell that it 855 // is impossible to alias the pointer we're checking. If not, we have to 856 // assume that the call could touch the pointer, even though it doesn't 857 // escape. 858 if (!isNoAlias(Location(*CI), Location(Object))) { 859 PassedAsArg = true; 860 break; 861 } 862 } 863 864 if (!PassedAsArg) 865 return NoModRef; 866 } 867 868 // While the assume intrinsic is marked as arbitrarily writing so that 869 // proper control dependencies will be maintained, it never aliases any 870 // particular memory location. 871 if (isAssumeIntrinsic(CS)) 872 return NoModRef; 873 874 // The AliasAnalysis base class has some smarts, lets use them. 875 return AliasAnalysis::getModRefInfo(CS, Loc); 876 } 877 878 AliasAnalysis::ModRefResult 879 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS1, 880 ImmutableCallSite CS2) { 881 // While the assume intrinsic is marked as arbitrarily writing so that 882 // proper control dependencies will be maintained, it never aliases any 883 // particular memory location. 884 if (isAssumeIntrinsic(CS1) || isAssumeIntrinsic(CS2)) 885 return NoModRef; 886 887 // The AliasAnalysis base class has some smarts, lets use them. 888 return AliasAnalysis::getModRefInfo(CS1, CS2); 889 } 890 891 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction 892 /// against another pointer. We know that V1 is a GEP, but we don't know 893 /// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, DL), 894 /// UnderlyingV2 is the same for V2. 895 /// 896 AliasAnalysis::AliasResult 897 BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, 898 const AAMDNodes &V1AAInfo, 899 const Value *V2, uint64_t V2Size, 900 const AAMDNodes &V2AAInfo, 901 const Value *UnderlyingV1, 902 const Value *UnderlyingV2) { 903 int64_t GEP1BaseOffset; 904 bool GEP1MaxLookupReached; 905 SmallVector<VariableGEPIndex, 4> GEP1VariableIndices; 906 907 // We have to get two AssumptionCaches here because GEP1 and V2 may be from 908 // different functions. 909 // FIXME: This really doesn't make any sense. We get a dominator tree below 910 // that can only refer to a single function. But this function (aliasGEP) is 911 // a method on an immutable pass that can be called when there *isn't* 912 // a single function. The old pass management layer makes this "work", but 913 // this isn't really a clean solution. 914 AssumptionCacheTracker &ACT = getAnalysis<AssumptionCacheTracker>(); 915 AssumptionCache *AC1 = nullptr, *AC2 = nullptr; 916 if (auto *GEP1I = dyn_cast<Instruction>(GEP1)) 917 AC1 = &ACT.getAssumptionCache( 918 const_cast<Function &>(*GEP1I->getParent()->getParent())); 919 if (auto *I2 = dyn_cast<Instruction>(V2)) 920 AC2 = &ACT.getAssumptionCache( 921 const_cast<Function &>(*I2->getParent()->getParent())); 922 923 DominatorTreeWrapperPass *DTWP = 924 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 925 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; 926 927 // If we have two gep instructions with must-alias or not-alias'ing base 928 // pointers, figure out if the indexes to the GEP tell us anything about the 929 // derived pointer. 930 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 931 // Do the base pointers alias? 932 AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, AAMDNodes(), 933 UnderlyingV2, UnknownSize, AAMDNodes()); 934 935 // Check for geps of non-aliasing underlying pointers where the offsets are 936 // identical. 937 if ((BaseAlias == MayAlias) && V1Size == V2Size) { 938 // Do the base pointers alias assuming type and size. 939 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, 940 V1AAInfo, UnderlyingV2, 941 V2Size, V2AAInfo); 942 if (PreciseBaseAlias == NoAlias) { 943 // See if the computed offset from the common pointer tells us about the 944 // relation of the resulting pointer. 945 int64_t GEP2BaseOffset; 946 bool GEP2MaxLookupReached; 947 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 948 const Value *GEP2BasePtr = 949 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, 950 GEP2MaxLookupReached, DL, AC2, DT); 951 const Value *GEP1BasePtr = 952 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, 953 GEP1MaxLookupReached, DL, AC1, DT); 954 // DecomposeGEPExpression and GetUnderlyingObject should return the 955 // same result except when DecomposeGEPExpression has no DataLayout. 956 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 957 assert(!DL && 958 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 959 return MayAlias; 960 } 961 // If the max search depth is reached the result is undefined 962 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 963 return MayAlias; 964 965 // Same offsets. 966 if (GEP1BaseOffset == GEP2BaseOffset && 967 GEP1VariableIndices == GEP2VariableIndices) 968 return NoAlias; 969 GEP1VariableIndices.clear(); 970 } 971 } 972 973 // If we get a No or May, then return it immediately, no amount of analysis 974 // will improve this situation. 975 if (BaseAlias != MustAlias) return BaseAlias; 976 977 // Otherwise, we have a MustAlias. Since the base pointers alias each other 978 // exactly, see if the computed offset from the common pointer tells us 979 // about the relation of the resulting pointer. 980 const Value *GEP1BasePtr = 981 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, 982 GEP1MaxLookupReached, DL, AC1, DT); 983 984 int64_t GEP2BaseOffset; 985 bool GEP2MaxLookupReached; 986 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 987 const Value *GEP2BasePtr = 988 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, 989 GEP2MaxLookupReached, DL, AC2, DT); 990 991 // DecomposeGEPExpression and GetUnderlyingObject should return the 992 // same result except when DecomposeGEPExpression has no DataLayout. 993 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 994 assert(!DL && 995 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 996 return MayAlias; 997 } 998 // If the max search depth is reached the result is undefined 999 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1000 return MayAlias; 1001 1002 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1003 // symbolic difference. 1004 GEP1BaseOffset -= GEP2BaseOffset; 1005 GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices); 1006 1007 } else { 1008 // Check to see if these two pointers are related by the getelementptr 1009 // instruction. If one pointer is a GEP with a non-zero index of the other 1010 // pointer, we know they cannot alias. 1011 1012 // If both accesses are unknown size, we can't do anything useful here. 1013 if (V1Size == UnknownSize && V2Size == UnknownSize) 1014 return MayAlias; 1015 1016 AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, AAMDNodes(), 1017 V2, V2Size, V2AAInfo); 1018 if (R != MustAlias) 1019 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 1020 // If V2 is known not to alias GEP base pointer, then the two values 1021 // cannot alias per GEP semantics: "A pointer value formed from a 1022 // getelementptr instruction is associated with the addresses associated 1023 // with the first operand of the getelementptr". 1024 return R; 1025 1026 const Value *GEP1BasePtr = 1027 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, 1028 GEP1MaxLookupReached, DL, AC1, DT); 1029 1030 // DecomposeGEPExpression and GetUnderlyingObject should return the 1031 // same result except when DecomposeGEPExpression has no DataLayout. 1032 if (GEP1BasePtr != UnderlyingV1) { 1033 assert(!DL && 1034 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 1035 return MayAlias; 1036 } 1037 // If the max search depth is reached the result is undefined 1038 if (GEP1MaxLookupReached) 1039 return MayAlias; 1040 } 1041 1042 // In the two GEP Case, if there is no difference in the offsets of the 1043 // computed pointers, the resultant pointers are a must alias. This 1044 // hapens when we have two lexically identical GEP's (for example). 1045 // 1046 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 1047 // must aliases the GEP, the end result is a must alias also. 1048 if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty()) 1049 return MustAlias; 1050 1051 // If there is a constant difference between the pointers, but the difference 1052 // is less than the size of the associated memory object, then we know 1053 // that the objects are partially overlapping. If the difference is 1054 // greater, we know they do not overlap. 1055 if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) { 1056 if (GEP1BaseOffset >= 0) { 1057 if (V2Size != UnknownSize) { 1058 if ((uint64_t)GEP1BaseOffset < V2Size) 1059 return PartialAlias; 1060 return NoAlias; 1061 } 1062 } else { 1063 // We have the situation where: 1064 // + + 1065 // | BaseOffset | 1066 // ---------------->| 1067 // |-->V1Size |-------> V2Size 1068 // GEP1 V2 1069 // We need to know that V2Size is not unknown, otherwise we might have 1070 // stripped a gep with negative index ('gep <ptr>, -1, ...). 1071 if (V1Size != UnknownSize && V2Size != UnknownSize) { 1072 if (-(uint64_t)GEP1BaseOffset < V1Size) 1073 return PartialAlias; 1074 return NoAlias; 1075 } 1076 } 1077 } 1078 1079 if (!GEP1VariableIndices.empty()) { 1080 uint64_t Modulo = 0; 1081 bool AllPositive = true; 1082 for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i) { 1083 1084 // Try to distinguish something like &A[i][1] against &A[42][0]. 1085 // Grab the least significant bit set in any of the scales. We 1086 // don't need std::abs here (even if the scale's negative) as we'll 1087 // be ^'ing Modulo with itself later. 1088 Modulo |= (uint64_t) GEP1VariableIndices[i].Scale; 1089 1090 if (AllPositive) { 1091 // If the Value could change between cycles, then any reasoning about 1092 // the Value this cycle may not hold in the next cycle. We'll just 1093 // give up if we can't determine conditions that hold for every cycle: 1094 const Value *V = GEP1VariableIndices[i].V; 1095 1096 bool SignKnownZero, SignKnownOne; 1097 ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, DL, 1098 0, AC1, nullptr, DT); 1099 1100 // Zero-extension widens the variable, and so forces the sign 1101 // bit to zero. 1102 bool IsZExt = GEP1VariableIndices[i].Extension == EK_ZeroExt; 1103 SignKnownZero |= IsZExt; 1104 SignKnownOne &= !IsZExt; 1105 1106 // If the variable begins with a zero then we know it's 1107 // positive, regardless of whether the value is signed or 1108 // unsigned. 1109 int64_t Scale = GEP1VariableIndices[i].Scale; 1110 AllPositive = 1111 (SignKnownZero && Scale >= 0) || 1112 (SignKnownOne && Scale < 0); 1113 } 1114 } 1115 1116 Modulo = Modulo ^ (Modulo & (Modulo - 1)); 1117 1118 // We can compute the difference between the two addresses 1119 // mod Modulo. Check whether that difference guarantees that the 1120 // two locations do not alias. 1121 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1); 1122 if (V1Size != UnknownSize && V2Size != UnknownSize && 1123 ModOffset >= V2Size && V1Size <= Modulo - ModOffset) 1124 return NoAlias; 1125 1126 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr. 1127 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers 1128 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr. 1129 if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t) GEP1BaseOffset) 1130 return NoAlias; 1131 } 1132 1133 // Statically, we can see that the base objects are the same, but the 1134 // pointers have dynamic offsets which we can't resolve. And none of our 1135 // little tricks above worked. 1136 // 1137 // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the 1138 // practical effect of this is protecting TBAA in the case of dynamic 1139 // indices into arrays of unions or malloc'd memory. 1140 return PartialAlias; 1141 } 1142 1143 static AliasAnalysis::AliasResult 1144 MergeAliasResults(AliasAnalysis::AliasResult A, AliasAnalysis::AliasResult B) { 1145 // If the results agree, take it. 1146 if (A == B) 1147 return A; 1148 // A mix of PartialAlias and MustAlias is PartialAlias. 1149 if ((A == AliasAnalysis::PartialAlias && B == AliasAnalysis::MustAlias) || 1150 (B == AliasAnalysis::PartialAlias && A == AliasAnalysis::MustAlias)) 1151 return AliasAnalysis::PartialAlias; 1152 // Otherwise, we don't know anything. 1153 return AliasAnalysis::MayAlias; 1154 } 1155 1156 /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select 1157 /// instruction against another. 1158 AliasAnalysis::AliasResult 1159 BasicAliasAnalysis::aliasSelect(const SelectInst *SI, uint64_t SISize, 1160 const AAMDNodes &SIAAInfo, 1161 const Value *V2, uint64_t V2Size, 1162 const AAMDNodes &V2AAInfo) { 1163 // If the values are Selects with the same condition, we can do a more precise 1164 // check: just check for aliases between the values on corresponding arms. 1165 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1166 if (SI->getCondition() == SI2->getCondition()) { 1167 AliasResult Alias = 1168 aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, 1169 SI2->getTrueValue(), V2Size, V2AAInfo); 1170 if (Alias == MayAlias) 1171 return MayAlias; 1172 AliasResult ThisAlias = 1173 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo, 1174 SI2->getFalseValue(), V2Size, V2AAInfo); 1175 return MergeAliasResults(ThisAlias, Alias); 1176 } 1177 1178 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1179 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1180 AliasResult Alias = 1181 aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), SISize, SIAAInfo); 1182 if (Alias == MayAlias) 1183 return MayAlias; 1184 1185 AliasResult ThisAlias = 1186 aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo); 1187 return MergeAliasResults(ThisAlias, Alias); 1188 } 1189 1190 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction 1191 // against another. 1192 AliasAnalysis::AliasResult 1193 BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize, 1194 const AAMDNodes &PNAAInfo, 1195 const Value *V2, uint64_t V2Size, 1196 const AAMDNodes &V2AAInfo) { 1197 // Track phi nodes we have visited. We use this information when we determine 1198 // value equivalence. 1199 VisitedPhiBBs.insert(PN->getParent()); 1200 1201 // If the values are PHIs in the same block, we can do a more precise 1202 // as well as efficient check: just check for aliases between the values 1203 // on corresponding edges. 1204 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1205 if (PN2->getParent() == PN->getParent()) { 1206 LocPair Locs(Location(PN, PNSize, PNAAInfo), 1207 Location(V2, V2Size, V2AAInfo)); 1208 if (PN > V2) 1209 std::swap(Locs.first, Locs.second); 1210 // Analyse the PHIs' inputs under the assumption that the PHIs are 1211 // NoAlias. 1212 // If the PHIs are May/MustAlias there must be (recursively) an input 1213 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or 1214 // there must be an operation on the PHIs within the PHIs' value cycle 1215 // that causes a MayAlias. 1216 // Pretend the phis do not alias. 1217 AliasResult Alias = NoAlias; 1218 assert(AliasCache.count(Locs) && 1219 "There must exist an entry for the phi node"); 1220 AliasResult OrigAliasResult = AliasCache[Locs]; 1221 AliasCache[Locs] = NoAlias; 1222 1223 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1224 AliasResult ThisAlias = 1225 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo, 1226 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1227 V2Size, V2AAInfo); 1228 Alias = MergeAliasResults(ThisAlias, Alias); 1229 if (Alias == MayAlias) 1230 break; 1231 } 1232 1233 // Reset if speculation failed. 1234 if (Alias != NoAlias) 1235 AliasCache[Locs] = OrigAliasResult; 1236 1237 return Alias; 1238 } 1239 1240 SmallPtrSet<Value*, 4> UniqueSrc; 1241 SmallVector<Value*, 4> V1Srcs; 1242 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1243 Value *PV1 = PN->getIncomingValue(i); 1244 if (isa<PHINode>(PV1)) 1245 // If any of the source itself is a PHI, return MayAlias conservatively 1246 // to avoid compile time explosion. The worst possible case is if both 1247 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1248 // and 'n' are the number of PHI sources. 1249 return MayAlias; 1250 if (UniqueSrc.insert(PV1).second) 1251 V1Srcs.push_back(PV1); 1252 } 1253 1254 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, 1255 V1Srcs[0], PNSize, PNAAInfo); 1256 // Early exit if the check of the first PHI source against V2 is MayAlias. 1257 // Other results are not possible. 1258 if (Alias == MayAlias) 1259 return MayAlias; 1260 1261 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1262 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1263 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1264 Value *V = V1Srcs[i]; 1265 1266 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, 1267 V, PNSize, PNAAInfo); 1268 Alias = MergeAliasResults(ThisAlias, Alias); 1269 if (Alias == MayAlias) 1270 break; 1271 } 1272 1273 return Alias; 1274 } 1275 1276 // aliasCheck - Provide a bunch of ad-hoc rules to disambiguate in common cases, 1277 // such as array references. 1278 // 1279 AliasAnalysis::AliasResult 1280 BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, 1281 AAMDNodes V1AAInfo, 1282 const Value *V2, uint64_t V2Size, 1283 AAMDNodes V2AAInfo) { 1284 // If either of the memory references is empty, it doesn't matter what the 1285 // pointer values are. 1286 if (V1Size == 0 || V2Size == 0) 1287 return NoAlias; 1288 1289 // Strip off any casts if they exist. 1290 V1 = V1->stripPointerCasts(); 1291 V2 = V2->stripPointerCasts(); 1292 1293 // Are we checking for alias of the same value? 1294 // Because we look 'through' phi nodes we could look at "Value" pointers from 1295 // different iterations. We must therefore make sure that this is not the 1296 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1297 // happen by looking at the visited phi nodes and making sure they cannot 1298 // reach the value. 1299 if (isValueEqualInPotentialCycles(V1, V2)) 1300 return MustAlias; 1301 1302 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1303 return NoAlias; // Scalars cannot alias each other 1304 1305 // Figure out what objects these things are pointing to if we can. 1306 const Value *O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth); 1307 const Value *O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth); 1308 1309 // Null values in the default address space don't point to any object, so they 1310 // don't alias any other pointer. 1311 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1312 if (CPN->getType()->getAddressSpace() == 0) 1313 return NoAlias; 1314 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1315 if (CPN->getType()->getAddressSpace() == 0) 1316 return NoAlias; 1317 1318 if (O1 != O2) { 1319 // If V1/V2 point to two different objects we know that we have no alias. 1320 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1321 return NoAlias; 1322 1323 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1324 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1325 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1326 return NoAlias; 1327 1328 // Function arguments can't alias with things that are known to be 1329 // unambigously identified at the function level. 1330 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1331 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1332 return NoAlias; 1333 1334 // Most objects can't alias null. 1335 if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) || 1336 (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2))) 1337 return NoAlias; 1338 1339 // If one pointer is the result of a call/invoke or load and the other is a 1340 // non-escaping local object within the same function, then we know the 1341 // object couldn't escape to a point where the call could return it. 1342 // 1343 // Note that if the pointers are in different functions, there are a 1344 // variety of complications. A call with a nocapture argument may still 1345 // temporary store the nocapture argument's value in a temporary memory 1346 // location if that memory location doesn't escape. Or it may pass a 1347 // nocapture value to other functions as long as they don't capture it. 1348 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2)) 1349 return NoAlias; 1350 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1)) 1351 return NoAlias; 1352 } 1353 1354 // If the size of one access is larger than the entire object on the other 1355 // side, then we know such behavior is undefined and can assume no alias. 1356 if (DL) 1357 if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *DL, *TLI)) || 1358 (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *DL, *TLI))) 1359 return NoAlias; 1360 1361 // Check the cache before climbing up use-def chains. This also terminates 1362 // otherwise infinitely recursive queries. 1363 LocPair Locs(Location(V1, V1Size, V1AAInfo), 1364 Location(V2, V2Size, V2AAInfo)); 1365 if (V1 > V2) 1366 std::swap(Locs.first, Locs.second); 1367 std::pair<AliasCacheTy::iterator, bool> Pair = 1368 AliasCache.insert(std::make_pair(Locs, MayAlias)); 1369 if (!Pair.second) 1370 return Pair.first->second; 1371 1372 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the 1373 // GEP can't simplify, we don't even look at the PHI cases. 1374 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) { 1375 std::swap(V1, V2); 1376 std::swap(V1Size, V2Size); 1377 std::swap(O1, O2); 1378 std::swap(V1AAInfo, V2AAInfo); 1379 } 1380 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1381 AliasResult Result = aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2); 1382 if (Result != MayAlias) return AliasCache[Locs] = Result; 1383 } 1384 1385 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { 1386 std::swap(V1, V2); 1387 std::swap(V1Size, V2Size); 1388 std::swap(V1AAInfo, V2AAInfo); 1389 } 1390 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1391 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, 1392 V2, V2Size, V2AAInfo); 1393 if (Result != MayAlias) return AliasCache[Locs] = Result; 1394 } 1395 1396 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { 1397 std::swap(V1, V2); 1398 std::swap(V1Size, V2Size); 1399 std::swap(V1AAInfo, V2AAInfo); 1400 } 1401 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1402 AliasResult Result = aliasSelect(S1, V1Size, V1AAInfo, 1403 V2, V2Size, V2AAInfo); 1404 if (Result != MayAlias) return AliasCache[Locs] = Result; 1405 } 1406 1407 // If both pointers are pointing into the same object and one of them 1408 // accesses is accessing the entire object, then the accesses must 1409 // overlap in some way. 1410 if (DL && O1 == O2) 1411 if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *DL, *TLI)) || 1412 (V2Size != UnknownSize && isObjectSize(O2, V2Size, *DL, *TLI))) 1413 return AliasCache[Locs] = PartialAlias; 1414 1415 AliasResult Result = 1416 AliasAnalysis::alias(Location(V1, V1Size, V1AAInfo), 1417 Location(V2, V2Size, V2AAInfo)); 1418 return AliasCache[Locs] = Result; 1419 } 1420 1421 bool BasicAliasAnalysis::isValueEqualInPotentialCycles(const Value *V, 1422 const Value *V2) { 1423 if (V != V2) 1424 return false; 1425 1426 const Instruction *Inst = dyn_cast<Instruction>(V); 1427 if (!Inst) 1428 return true; 1429 1430 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1431 return false; 1432 1433 // Use dominance or loop info if available. 1434 DominatorTreeWrapperPass *DTWP = 1435 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 1436 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; 1437 LoopInfo *LI = getAnalysisIfAvailable<LoopInfo>(); 1438 1439 // Make sure that the visited phis cannot reach the Value. This ensures that 1440 // the Values cannot come from different iterations of a potential cycle the 1441 // phi nodes could be involved in. 1442 for (auto *P : VisitedPhiBBs) 1443 if (isPotentiallyReachable(P->begin(), Inst, DT, LI)) 1444 return false; 1445 1446 return true; 1447 } 1448 1449 /// GetIndexDifference - Dest and Src are the variable indices from two 1450 /// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base 1451 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic 1452 /// difference between the two pointers. 1453 void BasicAliasAnalysis::GetIndexDifference( 1454 SmallVectorImpl<VariableGEPIndex> &Dest, 1455 const SmallVectorImpl<VariableGEPIndex> &Src) { 1456 if (Src.empty()) 1457 return; 1458 1459 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 1460 const Value *V = Src[i].V; 1461 ExtensionKind Extension = Src[i].Extension; 1462 int64_t Scale = Src[i].Scale; 1463 1464 // Find V in Dest. This is N^2, but pointer indices almost never have more 1465 // than a few variable indexes. 1466 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 1467 if (!isValueEqualInPotentialCycles(Dest[j].V, V) || 1468 Dest[j].Extension != Extension) 1469 continue; 1470 1471 // If we found it, subtract off Scale V's from the entry in Dest. If it 1472 // goes to zero, remove the entry. 1473 if (Dest[j].Scale != Scale) 1474 Dest[j].Scale -= Scale; 1475 else 1476 Dest.erase(Dest.begin() + j); 1477 Scale = 0; 1478 break; 1479 } 1480 1481 // If we didn't consume this entry, add it to the end of the Dest list. 1482 if (Scale) { 1483 VariableGEPIndex Entry = { V, Extension, -Scale }; 1484 Dest.push_back(Entry); 1485 } 1486 } 1487 } 1488