1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the primary stateless implementation of the 11 // Alias Analysis interface that implements identities (two different 12 // globals cannot alias, etc), but does no stateful analysis. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/Passes.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/Analysis/AssumptionCache.h" 21 #include "llvm/Analysis/CFG.h" 22 #include "llvm/Analysis/CaptureTracking.h" 23 #include "llvm/Analysis/InstructionSimplify.h" 24 #include "llvm/Analysis/LoopInfo.h" 25 #include "llvm/Analysis/MemoryBuiltins.h" 26 #include "llvm/Analysis/TargetLibraryInfo.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/Constants.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Dominators.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/GetElementPtrTypeIterator.h" 34 #include "llvm/IR/GlobalAlias.h" 35 #include "llvm/IR/GlobalVariable.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/LLVMContext.h" 39 #include "llvm/IR/Operator.h" 40 #include "llvm/Pass.h" 41 #include "llvm/Support/ErrorHandling.h" 42 #include <algorithm> 43 using namespace llvm; 44 45 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 46 /// in a cycle. Because we are analysing 'through' phi nodes we need to be 47 /// careful with value equivalence. We use reachability to make sure a value 48 /// cannot be involved in a cycle. 49 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 50 51 // The max limit of the search depth in DecomposeGEPExpression() and 52 // GetUnderlyingObject(), both functions need to use the same search 53 // depth otherwise the algorithm in aliasGEP will assert. 54 static const unsigned MaxLookupSearchDepth = 6; 55 56 //===----------------------------------------------------------------------===// 57 // Useful predicates 58 //===----------------------------------------------------------------------===// 59 60 /// isNonEscapingLocalObject - Return true if the pointer is to a function-local 61 /// object that never escapes from the function. 62 static bool isNonEscapingLocalObject(const Value *V) { 63 // If this is a local allocation, check to see if it escapes. 64 if (isa<AllocaInst>(V) || isNoAliasCall(V)) 65 // Set StoreCaptures to True so that we can assume in our callers that the 66 // pointer is not the result of a load instruction. Currently 67 // PointerMayBeCaptured doesn't have any special analysis for the 68 // StoreCaptures=false case; if it did, our callers could be refined to be 69 // more precise. 70 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 71 72 // If this is an argument that corresponds to a byval or noalias argument, 73 // then it has not escaped before entering the function. Check if it escapes 74 // inside the function. 75 if (const Argument *A = dyn_cast<Argument>(V)) 76 if (A->hasByValAttr() || A->hasNoAliasAttr()) 77 // Note even if the argument is marked nocapture we still need to check 78 // for copies made inside the function. The nocapture attribute only 79 // specifies that there are no copies made that outlive the function. 80 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 81 82 return false; 83 } 84 85 /// isEscapeSource - Return true if the pointer is one which would have 86 /// been considered an escape by isNonEscapingLocalObject. 87 static bool isEscapeSource(const Value *V) { 88 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V)) 89 return true; 90 91 // The load case works because isNonEscapingLocalObject considers all 92 // stores to be escapes (it passes true for the StoreCaptures argument 93 // to PointerMayBeCaptured). 94 if (isa<LoadInst>(V)) 95 return true; 96 97 return false; 98 } 99 100 /// getObjectSize - Return the size of the object specified by V, or 101 /// UnknownSize if unknown. 102 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 103 const TargetLibraryInfo &TLI, 104 bool RoundToAlign = false) { 105 uint64_t Size; 106 if (getObjectSize(V, Size, DL, &TLI, RoundToAlign)) 107 return Size; 108 return AliasAnalysis::UnknownSize; 109 } 110 111 /// isObjectSmallerThan - Return true if we can prove that the object specified 112 /// by V is smaller than Size. 113 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 114 const DataLayout &DL, 115 const TargetLibraryInfo &TLI) { 116 // Note that the meanings of the "object" are slightly different in the 117 // following contexts: 118 // c1: llvm::getObjectSize() 119 // c2: llvm.objectsize() intrinsic 120 // c3: isObjectSmallerThan() 121 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 122 // refers to the "entire object". 123 // 124 // Consider this example: 125 // char *p = (char*)malloc(100) 126 // char *q = p+80; 127 // 128 // In the context of c1 and c2, the "object" pointed by q refers to the 129 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 130 // 131 // However, in the context of c3, the "object" refers to the chunk of memory 132 // being allocated. So, the "object" has 100 bytes, and q points to the middle 133 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 134 // parameter, before the llvm::getObjectSize() is called to get the size of 135 // entire object, we should: 136 // - either rewind the pointer q to the base-address of the object in 137 // question (in this case rewind to p), or 138 // - just give up. It is up to caller to make sure the pointer is pointing 139 // to the base address the object. 140 // 141 // We go for 2nd option for simplicity. 142 if (!isIdentifiedObject(V)) 143 return false; 144 145 // This function needs to use the aligned object size because we allow 146 // reads a bit past the end given sufficient alignment. 147 uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/true); 148 149 return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size; 150 } 151 152 /// isObjectSize - Return true if we can prove that the object specified 153 /// by V has size Size. 154 static bool isObjectSize(const Value *V, uint64_t Size, 155 const DataLayout &DL, const TargetLibraryInfo &TLI) { 156 uint64_t ObjectSize = getObjectSize(V, DL, TLI); 157 return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size; 158 } 159 160 //===----------------------------------------------------------------------===// 161 // GetElementPtr Instruction Decomposition and Analysis 162 //===----------------------------------------------------------------------===// 163 164 namespace { 165 166 // A linear transformation of a Value; this class represents ZExt(SExt(V, 167 // SExtBits), ZExtBits) * Scale + Offset. 168 struct VariableGEPIndex { 169 170 // An opaque Value - we can't decompose this further. 171 const Value *V; 172 173 // We need to track what extensions we've done as we consider the same Value 174 // with different extensions as different variables in a GEP's linear 175 // expression; 176 // e.g.: if V == -1, then sext(x) != zext(x). 177 unsigned ZExtBits; 178 unsigned SExtBits; 179 180 int64_t Scale; 181 182 bool operator==(const VariableGEPIndex &Other) const { 183 return V == Other.V && ZExtBits == Other.ZExtBits && 184 SExtBits == Other.SExtBits && Scale == Other.Scale; 185 } 186 187 bool operator!=(const VariableGEPIndex &Other) const { 188 return !operator==(Other); 189 } 190 }; 191 } 192 193 194 /// GetLinearExpression - Analyze the specified value as a linear expression: 195 /// "A*V + B", where A and B are constant integers. Return the scale and offset 196 /// values as APInts and return V as a Value*, and return whether we looked 197 /// through any sign or zero extends. The incoming Value is known to have 198 /// IntegerType and it may already be sign or zero extended. 199 /// 200 /// Note that this looks through extends, so the high bits may not be 201 /// represented in the result. 202 static const Value *GetLinearExpression(const Value *V, APInt &Scale, 203 APInt &Offset, unsigned &ZExtBits, 204 unsigned &SExtBits, 205 const DataLayout &DL, unsigned Depth, 206 AssumptionCache *AC, DominatorTree *DT, 207 bool &NSW, bool &NUW) { 208 assert(V->getType()->isIntegerTy() && "Not an integer value"); 209 210 // Limit our recursion depth. 211 if (Depth == 6) { 212 Scale = 1; 213 Offset = 0; 214 return V; 215 } 216 217 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) { 218 // if it's a constant, just convert it to an offset and remove the variable. 219 // If we've been called recursively the Offset bit width will be greater 220 // than the constant's (the Offset's always as wide as the outermost call), 221 // so we'll zext here and process any extension in the isa<SExtInst> & 222 // isa<ZExtInst> cases below. 223 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth()); 224 assert(Scale == 0 && "Constant values don't have a scale"); 225 return V; 226 } 227 228 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 229 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 230 231 // If we've been called recursively then Offset and Scale will be wider 232 // that the BOp operands. We'll always zext it here as we'll process sign 233 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases). 234 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth()); 235 236 switch (BOp->getOpcode()) { 237 default: 238 // We don't understand this instruction, so we can't decompose it any 239 // further. 240 Scale = 1; 241 Offset = 0; 242 return V; 243 case Instruction::Or: 244 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 245 // analyze it. 246 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 247 BOp, DT)) 248 break; 249 // FALL THROUGH. 250 case Instruction::Add: 251 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 252 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 253 Offset += RHS; 254 break; 255 case Instruction::Sub: 256 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 257 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 258 Offset -= RHS; 259 break; 260 case Instruction::Mul: 261 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 262 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 263 Offset *= RHS; 264 Scale *= RHS; 265 break; 266 case Instruction::Shl: 267 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 268 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 269 Offset <<= RHS.getLimitedValue(); 270 Scale <<= RHS.getLimitedValue(); 271 // the semantics of nsw and nuw for left shifts don't match those of 272 // multiplications, so we won't propagate them. 273 NSW = NUW = false; 274 return V; 275 } 276 277 if (isa<OverflowingBinaryOperator>(BOp)) { 278 NUW &= BOp->hasNoUnsignedWrap(); 279 NSW &= BOp->hasNoSignedWrap(); 280 } 281 return V; 282 } 283 } 284 285 // Since GEP indices are sign extended anyway, we don't care about the high 286 // bits of a sign or zero extended value - just scales and offsets. The 287 // extensions have to be consistent though. 288 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) { 289 Value *CastOp = cast<CastInst>(V)->getOperand(0); 290 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits(); 291 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 292 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits; 293 const Value *Result = 294 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL, 295 Depth + 1, AC, DT, NSW, NUW); 296 297 // zext(zext(%x)) == zext(%x), and similiarly for sext; we'll handle this 298 // by just incrementing the number of bits we've extended by. 299 unsigned ExtendedBy = NewWidth - SmallWidth; 300 301 if (isa<SExtInst>(V) && ZExtBits == 0) { 302 // sext(sext(%x, a), b) == sext(%x, a + b) 303 304 if (NSW) { 305 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c) 306 // into sext(%x) + sext(c). We'll sext the Offset ourselves: 307 unsigned OldWidth = Offset.getBitWidth(); 308 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth); 309 } else { 310 // We may have signed-wrapped, so don't decompose sext(%x + c) into 311 // sext(%x) + sext(c) 312 Scale = 1; 313 Offset = 0; 314 Result = CastOp; 315 ZExtBits = OldZExtBits; 316 SExtBits = OldSExtBits; 317 } 318 SExtBits += ExtendedBy; 319 } else { 320 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b) 321 322 if (!NUW) { 323 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into 324 // zext(%x) + zext(c) 325 Scale = 1; 326 Offset = 0; 327 Result = CastOp; 328 ZExtBits = OldZExtBits; 329 SExtBits = OldSExtBits; 330 } 331 ZExtBits += ExtendedBy; 332 } 333 334 return Result; 335 } 336 337 Scale = 1; 338 Offset = 0; 339 return V; 340 } 341 342 /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it 343 /// into a base pointer with a constant offset and a number of scaled symbolic 344 /// offsets. 345 /// 346 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale in 347 /// the VarIndices vector) are Value*'s that are known to be scaled by the 348 /// specified amount, but which may have other unrepresented high bits. As such, 349 /// the gep cannot necessarily be reconstructed from its decomposed form. 350 /// 351 /// When DataLayout is around, this function is capable of analyzing everything 352 /// that GetUnderlyingObject can look through. To be able to do that 353 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search 354 /// depth (MaxLookupSearchDepth). 355 /// When DataLayout not is around, it just looks through pointer casts. 356 /// 357 static const Value * 358 DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, 359 SmallVectorImpl<VariableGEPIndex> &VarIndices, 360 bool &MaxLookupReached, const DataLayout &DL, 361 AssumptionCache *AC, DominatorTree *DT) { 362 // Limit recursion depth to limit compile time in crazy cases. 363 unsigned MaxLookup = MaxLookupSearchDepth; 364 MaxLookupReached = false; 365 366 BaseOffs = 0; 367 do { 368 // See if this is a bitcast or GEP. 369 const Operator *Op = dyn_cast<Operator>(V); 370 if (!Op) { 371 // The only non-operator case we can handle are GlobalAliases. 372 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 373 if (!GA->mayBeOverridden()) { 374 V = GA->getAliasee(); 375 continue; 376 } 377 } 378 return V; 379 } 380 381 if (Op->getOpcode() == Instruction::BitCast || 382 Op->getOpcode() == Instruction::AddrSpaceCast) { 383 V = Op->getOperand(0); 384 continue; 385 } 386 387 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 388 if (!GEPOp) { 389 // If it's not a GEP, hand it off to SimplifyInstruction to see if it 390 // can come up with something. This matches what GetUnderlyingObject does. 391 if (const Instruction *I = dyn_cast<Instruction>(V)) 392 // TODO: Get a DominatorTree and AssumptionCache and use them here 393 // (these are both now available in this function, but this should be 394 // updated when GetUnderlyingObject is updated). TLI should be 395 // provided also. 396 if (const Value *Simplified = 397 SimplifyInstruction(const_cast<Instruction *>(I), DL)) { 398 V = Simplified; 399 continue; 400 } 401 402 return V; 403 } 404 405 // Don't attempt to analyze GEPs over unsized objects. 406 if (!GEPOp->getOperand(0)->getType()->getPointerElementType()->isSized()) 407 return V; 408 409 unsigned AS = GEPOp->getPointerAddressSpace(); 410 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 411 gep_type_iterator GTI = gep_type_begin(GEPOp); 412 for (User::const_op_iterator I = GEPOp->op_begin()+1, 413 E = GEPOp->op_end(); I != E; ++I) { 414 const Value *Index = *I; 415 // Compute the (potentially symbolic) offset in bytes for this index. 416 if (StructType *STy = dyn_cast<StructType>(*GTI++)) { 417 // For a struct, add the member offset. 418 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 419 if (FieldNo == 0) continue; 420 421 BaseOffs += DL.getStructLayout(STy)->getElementOffset(FieldNo); 422 continue; 423 } 424 425 // For an array/pointer, add the element offset, explicitly scaled. 426 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 427 if (CIdx->isZero()) continue; 428 BaseOffs += DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue(); 429 continue; 430 } 431 432 uint64_t Scale = DL.getTypeAllocSize(*GTI); 433 unsigned ZExtBits = 0, SExtBits = 0; 434 435 // If the integer type is smaller than the pointer size, it is implicitly 436 // sign extended to pointer size. 437 unsigned Width = Index->getType()->getIntegerBitWidth(); 438 unsigned PointerSize = DL.getPointerSizeInBits(AS); 439 if (PointerSize > Width) 440 SExtBits += PointerSize - Width; 441 442 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 443 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 444 bool NSW = true, NUW = true; 445 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits, 446 SExtBits, DL, 0, AC, DT, NSW, NUW); 447 448 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 449 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 450 BaseOffs += IndexOffset.getSExtValue()*Scale; 451 Scale *= IndexScale.getSExtValue(); 452 453 // If we already had an occurrence of this index variable, merge this 454 // scale into it. For example, we want to handle: 455 // A[x][x] -> x*16 + x*4 -> x*20 456 // This also ensures that 'x' only appears in the index list once. 457 for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) { 458 if (VarIndices[i].V == Index && VarIndices[i].ZExtBits == ZExtBits && 459 VarIndices[i].SExtBits == SExtBits) { 460 Scale += VarIndices[i].Scale; 461 VarIndices.erase(VarIndices.begin()+i); 462 break; 463 } 464 } 465 466 // Make sure that we have a scale that makes sense for this target's 467 // pointer size. 468 if (unsigned ShiftBits = 64 - PointerSize) { 469 Scale <<= ShiftBits; 470 Scale = (int64_t)Scale >> ShiftBits; 471 } 472 473 if (Scale) { 474 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, 475 static_cast<int64_t>(Scale)}; 476 VarIndices.push_back(Entry); 477 } 478 } 479 480 // Analyze the base pointer next. 481 V = GEPOp->getOperand(0); 482 } while (--MaxLookup); 483 484 // If the chain of expressions is too deep, just return early. 485 MaxLookupReached = true; 486 return V; 487 } 488 489 //===----------------------------------------------------------------------===// 490 // BasicAliasAnalysis Pass 491 //===----------------------------------------------------------------------===// 492 493 #ifndef NDEBUG 494 static const Function *getParent(const Value *V) { 495 if (const Instruction *inst = dyn_cast<Instruction>(V)) 496 return inst->getParent()->getParent(); 497 498 if (const Argument *arg = dyn_cast<Argument>(V)) 499 return arg->getParent(); 500 501 return nullptr; 502 } 503 504 static bool notDifferentParent(const Value *O1, const Value *O2) { 505 506 const Function *F1 = getParent(O1); 507 const Function *F2 = getParent(O2); 508 509 return !F1 || !F2 || F1 == F2; 510 } 511 #endif 512 513 namespace { 514 /// BasicAliasAnalysis - This is the primary alias analysis implementation. 515 struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis { 516 static char ID; // Class identification, replacement for typeinfo 517 BasicAliasAnalysis() : ImmutablePass(ID) { 518 initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry()); 519 } 520 521 bool doInitialization(Module &M) override; 522 523 void getAnalysisUsage(AnalysisUsage &AU) const override { 524 AU.addRequired<AliasAnalysis>(); 525 AU.addRequired<AssumptionCacheTracker>(); 526 AU.addRequired<TargetLibraryInfoWrapperPass>(); 527 } 528 529 AliasResult alias(const Location &LocA, const Location &LocB) override { 530 assert(AliasCache.empty() && "AliasCache must be cleared after use!"); 531 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 532 "BasicAliasAnalysis doesn't support interprocedural queries."); 533 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, 534 LocB.Ptr, LocB.Size, LocB.AATags); 535 // AliasCache rarely has more than 1 or 2 elements, always use 536 // shrink_and_clear so it quickly returns to the inline capacity of the 537 // SmallDenseMap if it ever grows larger. 538 // FIXME: This should really be shrink_to_inline_capacity_and_clear(). 539 AliasCache.shrink_and_clear(); 540 VisitedPhiBBs.clear(); 541 return Alias; 542 } 543 544 ModRefResult getModRefInfo(ImmutableCallSite CS, 545 const Location &Loc) override; 546 547 ModRefResult getModRefInfo(ImmutableCallSite CS1, 548 ImmutableCallSite CS2) override; 549 550 /// pointsToConstantMemory - Chase pointers until we find a (constant 551 /// global) or not. 552 bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override; 553 554 /// Get the location associated with a pointer argument of a callsite. 555 Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx, 556 ModRefResult &Mask) override; 557 558 /// getModRefBehavior - Return the behavior when calling the given 559 /// call site. 560 ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override; 561 562 /// getModRefBehavior - Return the behavior when calling the given function. 563 /// For use when the call site is not known. 564 ModRefBehavior getModRefBehavior(const Function *F) override; 565 566 /// getAdjustedAnalysisPointer - This method is used when a pass implements 567 /// an analysis interface through multiple inheritance. If needed, it 568 /// should override this to adjust the this pointer as needed for the 569 /// specified pass info. 570 void *getAdjustedAnalysisPointer(const void *ID) override { 571 if (ID == &AliasAnalysis::ID) 572 return (AliasAnalysis*)this; 573 return this; 574 } 575 576 private: 577 // AliasCache - Track alias queries to guard against recursion. 578 typedef std::pair<Location, Location> LocPair; 579 typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy; 580 AliasCacheTy AliasCache; 581 582 /// \brief Track phi nodes we have visited. When interpret "Value" pointer 583 /// equality as value equality we need to make sure that the "Value" is not 584 /// part of a cycle. Otherwise, two uses could come from different 585 /// "iterations" of a cycle and see different values for the same "Value" 586 /// pointer. 587 /// The following example shows the problem: 588 /// %p = phi(%alloca1, %addr2) 589 /// %l = load %ptr 590 /// %addr1 = gep, %alloca2, 0, %l 591 /// %addr2 = gep %alloca2, 0, (%l + 1) 592 /// alias(%p, %addr1) -> MayAlias ! 593 /// store %l, ... 594 SmallPtrSet<const BasicBlock*, 8> VisitedPhiBBs; 595 596 // Visited - Track instructions visited by pointsToConstantMemory. 597 SmallPtrSet<const Value*, 16> Visited; 598 599 /// \brief Check whether two Values can be considered equivalent. 600 /// 601 /// In addition to pointer equivalence of \p V1 and \p V2 this checks 602 /// whether they can not be part of a cycle in the value graph by looking at 603 /// all visited phi nodes an making sure that the phis cannot reach the 604 /// value. We have to do this because we are looking through phi nodes (That 605 /// is we say noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 606 bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2); 607 608 /// \brief A Heuristic for aliasGEP that searches for a constant offset 609 /// between the variables. 610 /// 611 /// GetLinearExpression has some limitations, as generally zext(%x + 1) 612 /// != zext(%x) + zext(1) if the arithmetic overflows. GetLinearExpression 613 /// will therefore conservatively refuse to decompose these expressions. 614 /// However, we know that, for all %x, zext(%x) != zext(%x + 1), even if 615 /// the addition overflows. 616 bool 617 constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> &VarIndices, 618 uint64_t V1Size, uint64_t V2Size, 619 int64_t BaseOffset, const DataLayout *DL, 620 AssumptionCache *AC, DominatorTree *DT); 621 622 /// \brief Dest and Src are the variable indices from two decomposed 623 /// GetElementPtr instructions GEP1 and GEP2 which have common base 624 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic 625 /// difference between the two pointers. 626 void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest, 627 const SmallVectorImpl<VariableGEPIndex> &Src); 628 629 // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP 630 // instruction against another. 631 AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size, 632 const AAMDNodes &V1AAInfo, 633 const Value *V2, uint64_t V2Size, 634 const AAMDNodes &V2AAInfo, 635 const Value *UnderlyingV1, const Value *UnderlyingV2); 636 637 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI 638 // instruction against another. 639 AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize, 640 const AAMDNodes &PNAAInfo, 641 const Value *V2, uint64_t V2Size, 642 const AAMDNodes &V2AAInfo); 643 644 /// aliasSelect - Disambiguate a Select instruction against another value. 645 AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize, 646 const AAMDNodes &SIAAInfo, 647 const Value *V2, uint64_t V2Size, 648 const AAMDNodes &V2AAInfo); 649 650 AliasResult aliasCheck(const Value *V1, uint64_t V1Size, 651 AAMDNodes V1AATag, 652 const Value *V2, uint64_t V2Size, 653 AAMDNodes V2AATag); 654 }; 655 } // End of anonymous namespace 656 657 // Register this pass... 658 char BasicAliasAnalysis::ID = 0; 659 INITIALIZE_AG_PASS_BEGIN(BasicAliasAnalysis, AliasAnalysis, "basicaa", 660 "Basic Alias Analysis (stateless AA impl)", 661 false, true, false) 662 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 663 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 664 INITIALIZE_AG_PASS_END(BasicAliasAnalysis, AliasAnalysis, "basicaa", 665 "Basic Alias Analysis (stateless AA impl)", 666 false, true, false) 667 668 669 ImmutablePass *llvm::createBasicAliasAnalysisPass() { 670 return new BasicAliasAnalysis(); 671 } 672 673 /// pointsToConstantMemory - Returns whether the given pointer value 674 /// points to memory that is local to the function, with global constants being 675 /// considered local to all functions. 676 bool 677 BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) { 678 assert(Visited.empty() && "Visited must be cleared after use!"); 679 680 unsigned MaxLookup = 8; 681 SmallVector<const Value *, 16> Worklist; 682 Worklist.push_back(Loc.Ptr); 683 do { 684 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), *DL); 685 if (!Visited.insert(V).second) { 686 Visited.clear(); 687 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 688 } 689 690 // An alloca instruction defines local memory. 691 if (OrLocal && isa<AllocaInst>(V)) 692 continue; 693 694 // A global constant counts as local memory for our purposes. 695 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 696 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 697 // global to be marked constant in some modules and non-constant in 698 // others. GV may even be a declaration, not a definition. 699 if (!GV->isConstant()) { 700 Visited.clear(); 701 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 702 } 703 continue; 704 } 705 706 // If both select values point to local memory, then so does the select. 707 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 708 Worklist.push_back(SI->getTrueValue()); 709 Worklist.push_back(SI->getFalseValue()); 710 continue; 711 } 712 713 // If all values incoming to a phi node point to local memory, then so does 714 // the phi. 715 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 716 // Don't bother inspecting phi nodes with many operands. 717 if (PN->getNumIncomingValues() > MaxLookup) { 718 Visited.clear(); 719 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 720 } 721 for (Value *IncValue : PN->incoming_values()) 722 Worklist.push_back(IncValue); 723 continue; 724 } 725 726 // Otherwise be conservative. 727 Visited.clear(); 728 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal); 729 730 } while (!Worklist.empty() && --MaxLookup); 731 732 Visited.clear(); 733 return Worklist.empty(); 734 } 735 736 static bool isMemsetPattern16(const Function *MS, 737 const TargetLibraryInfo &TLI) { 738 if (TLI.has(LibFunc::memset_pattern16) && 739 MS->getName() == "memset_pattern16") { 740 FunctionType *MemsetType = MS->getFunctionType(); 741 if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 && 742 isa<PointerType>(MemsetType->getParamType(0)) && 743 isa<PointerType>(MemsetType->getParamType(1)) && 744 isa<IntegerType>(MemsetType->getParamType(2))) 745 return true; 746 } 747 748 return false; 749 } 750 751 /// getModRefBehavior - Return the behavior when calling the given call site. 752 AliasAnalysis::ModRefBehavior 753 BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) { 754 if (CS.doesNotAccessMemory()) 755 // Can't do better than this. 756 return DoesNotAccessMemory; 757 758 ModRefBehavior Min = UnknownModRefBehavior; 759 760 // If the callsite knows it only reads memory, don't return worse 761 // than that. 762 if (CS.onlyReadsMemory()) 763 Min = OnlyReadsMemory; 764 765 // The AliasAnalysis base class has some smarts, lets use them. 766 return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS) & Min); 767 } 768 769 /// getModRefBehavior - Return the behavior when calling the given function. 770 /// For use when the call site is not known. 771 AliasAnalysis::ModRefBehavior 772 BasicAliasAnalysis::getModRefBehavior(const Function *F) { 773 // If the function declares it doesn't access memory, we can't do better. 774 if (F->doesNotAccessMemory()) 775 return DoesNotAccessMemory; 776 777 // For intrinsics, we can check the table. 778 if (unsigned iid = F->getIntrinsicID()) { 779 #define GET_INTRINSIC_MODREF_BEHAVIOR 780 #include "llvm/IR/Intrinsics.gen" 781 #undef GET_INTRINSIC_MODREF_BEHAVIOR 782 } 783 784 ModRefBehavior Min = UnknownModRefBehavior; 785 786 // If the function declares it only reads memory, go with that. 787 if (F->onlyReadsMemory()) 788 Min = OnlyReadsMemory; 789 790 const TargetLibraryInfo &TLI = 791 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 792 if (isMemsetPattern16(F, TLI)) 793 Min = OnlyAccessesArgumentPointees; 794 795 // Otherwise be conservative. 796 return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min); 797 } 798 799 AliasAnalysis::Location 800 BasicAliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx, 801 ModRefResult &Mask) { 802 Location Loc = AliasAnalysis::getArgLocation(CS, ArgIdx, Mask); 803 const TargetLibraryInfo &TLI = 804 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 805 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()); 806 if (II != nullptr) 807 switch (II->getIntrinsicID()) { 808 default: break; 809 case Intrinsic::memset: 810 case Intrinsic::memcpy: 811 case Intrinsic::memmove: { 812 assert((ArgIdx == 0 || ArgIdx == 1) && 813 "Invalid argument index for memory intrinsic"); 814 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2))) 815 Loc.Size = LenCI->getZExtValue(); 816 assert(Loc.Ptr == II->getArgOperand(ArgIdx) && 817 "Memory intrinsic location pointer not argument?"); 818 Mask = ArgIdx ? Ref : Mod; 819 break; 820 } 821 case Intrinsic::lifetime_start: 822 case Intrinsic::lifetime_end: 823 case Intrinsic::invariant_start: { 824 assert(ArgIdx == 1 && "Invalid argument index"); 825 assert(Loc.Ptr == II->getArgOperand(ArgIdx) && 826 "Intrinsic location pointer not argument?"); 827 Loc.Size = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue(); 828 break; 829 } 830 case Intrinsic::invariant_end: { 831 assert(ArgIdx == 2 && "Invalid argument index"); 832 assert(Loc.Ptr == II->getArgOperand(ArgIdx) && 833 "Intrinsic location pointer not argument?"); 834 Loc.Size = cast<ConstantInt>(II->getArgOperand(1))->getZExtValue(); 835 break; 836 } 837 case Intrinsic::arm_neon_vld1: { 838 assert(ArgIdx == 0 && "Invalid argument index"); 839 assert(Loc.Ptr == II->getArgOperand(ArgIdx) && 840 "Intrinsic location pointer not argument?"); 841 // LLVM's vld1 and vst1 intrinsics currently only support a single 842 // vector register. 843 if (DL) 844 Loc.Size = DL->getTypeStoreSize(II->getType()); 845 break; 846 } 847 case Intrinsic::arm_neon_vst1: { 848 assert(ArgIdx == 0 && "Invalid argument index"); 849 assert(Loc.Ptr == II->getArgOperand(ArgIdx) && 850 "Intrinsic location pointer not argument?"); 851 if (DL) 852 Loc.Size = DL->getTypeStoreSize(II->getArgOperand(1)->getType()); 853 break; 854 } 855 } 856 857 // We can bound the aliasing properties of memset_pattern16 just as we can 858 // for memcpy/memset. This is particularly important because the 859 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 860 // whenever possible. 861 else if (CS.getCalledFunction() && 862 isMemsetPattern16(CS.getCalledFunction(), TLI)) { 863 assert((ArgIdx == 0 || ArgIdx == 1) && 864 "Invalid argument index for memset_pattern16"); 865 if (ArgIdx == 1) 866 Loc.Size = 16; 867 else if (const ConstantInt *LenCI = 868 dyn_cast<ConstantInt>(CS.getArgument(2))) 869 Loc.Size = LenCI->getZExtValue(); 870 assert(Loc.Ptr == CS.getArgument(ArgIdx) && 871 "memset_pattern16 location pointer not argument?"); 872 Mask = ArgIdx ? Ref : Mod; 873 } 874 // FIXME: Handle memset_pattern4 and memset_pattern8 also. 875 876 return Loc; 877 } 878 879 static bool isAssumeIntrinsic(ImmutableCallSite CS) { 880 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()); 881 if (II && II->getIntrinsicID() == Intrinsic::assume) 882 return true; 883 884 return false; 885 } 886 887 bool BasicAliasAnalysis::doInitialization(Module &M) { 888 InitializeAliasAnalysis(this, &M.getDataLayout()); 889 return true; 890 } 891 892 /// getModRefInfo - Check to see if the specified callsite can clobber the 893 /// specified memory object. Since we only look at local properties of this 894 /// function, we really can't say much about this query. We do, however, use 895 /// simple "address taken" analysis on local objects. 896 AliasAnalysis::ModRefResult 897 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, 898 const Location &Loc) { 899 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) && 900 "AliasAnalysis query involving multiple functions!"); 901 902 const Value *Object = GetUnderlyingObject(Loc.Ptr, *DL); 903 904 // If this is a tail call and Loc.Ptr points to a stack location, we know that 905 // the tail call cannot access or modify the local stack. 906 // We cannot exclude byval arguments here; these belong to the caller of 907 // the current function not to the current function, and a tail callee 908 // may reference them. 909 if (isa<AllocaInst>(Object)) 910 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) 911 if (CI->isTailCall()) 912 return NoModRef; 913 914 // If the pointer is to a locally allocated object that does not escape, 915 // then the call can not mod/ref the pointer unless the call takes the pointer 916 // as an argument, and itself doesn't capture it. 917 if (!isa<Constant>(Object) && CS.getInstruction() != Object && 918 isNonEscapingLocalObject(Object)) { 919 bool PassedAsArg = false; 920 unsigned ArgNo = 0; 921 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 922 CI != CE; ++CI, ++ArgNo) { 923 // Only look at the no-capture or byval pointer arguments. If this 924 // pointer were passed to arguments that were neither of these, then it 925 // couldn't be no-capture. 926 if (!(*CI)->getType()->isPointerTy() || 927 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo))) 928 continue; 929 930 // If this is a no-capture pointer argument, see if we can tell that it 931 // is impossible to alias the pointer we're checking. If not, we have to 932 // assume that the call could touch the pointer, even though it doesn't 933 // escape. 934 if (!isNoAlias(Location(*CI), Location(Object))) { 935 PassedAsArg = true; 936 break; 937 } 938 } 939 940 if (!PassedAsArg) 941 return NoModRef; 942 } 943 944 // While the assume intrinsic is marked as arbitrarily writing so that 945 // proper control dependencies will be maintained, it never aliases any 946 // particular memory location. 947 if (isAssumeIntrinsic(CS)) 948 return NoModRef; 949 950 // The AliasAnalysis base class has some smarts, lets use them. 951 return AliasAnalysis::getModRefInfo(CS, Loc); 952 } 953 954 AliasAnalysis::ModRefResult 955 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS1, 956 ImmutableCallSite CS2) { 957 // While the assume intrinsic is marked as arbitrarily writing so that 958 // proper control dependencies will be maintained, it never aliases any 959 // particular memory location. 960 if (isAssumeIntrinsic(CS1) || isAssumeIntrinsic(CS2)) 961 return NoModRef; 962 963 // The AliasAnalysis base class has some smarts, lets use them. 964 return AliasAnalysis::getModRefInfo(CS1, CS2); 965 } 966 967 /// \brief Provide ad-hoc rules to disambiguate accesses through two GEP 968 /// operators, both having the exact same pointer operand. 969 static AliasAnalysis::AliasResult 970 aliasSameBasePointerGEPs(const GEPOperator *GEP1, uint64_t V1Size, 971 const GEPOperator *GEP2, uint64_t V2Size, 972 const DataLayout &DL) { 973 974 assert(GEP1->getPointerOperand() == GEP2->getPointerOperand() && 975 "Expected GEPs with the same pointer operand"); 976 977 // Try to determine whether GEP1 and GEP2 index through arrays, into structs, 978 // such that the struct field accesses provably cannot alias. 979 // We also need at least two indices (the pointer, and the struct field). 980 if (GEP1->getNumIndices() != GEP2->getNumIndices() || 981 GEP1->getNumIndices() < 2) 982 return AliasAnalysis::MayAlias; 983 984 // If we don't know the size of the accesses through both GEPs, we can't 985 // determine whether the struct fields accessed can't alias. 986 if (V1Size == AliasAnalysis::UnknownSize || 987 V2Size == AliasAnalysis::UnknownSize) 988 return AliasAnalysis::MayAlias; 989 990 ConstantInt *C1 = 991 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1)); 992 ConstantInt *C2 = 993 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1)); 994 995 // If the last (struct) indices aren't constants, we can't say anything. 996 // If they're identical, the other indices might be also be dynamically 997 // equal, so the GEPs can alias. 998 if (!C1 || !C2 || C1 == C2) 999 return AliasAnalysis::MayAlias; 1000 1001 // Find the last-indexed type of the GEP, i.e., the type you'd get if 1002 // you stripped the last index. 1003 // On the way, look at each indexed type. If there's something other 1004 // than an array, different indices can lead to different final types. 1005 SmallVector<Value *, 8> IntermediateIndices; 1006 1007 // Insert the first index; we don't need to check the type indexed 1008 // through it as it only drops the pointer indirection. 1009 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine"); 1010 IntermediateIndices.push_back(GEP1->getOperand(1)); 1011 1012 // Insert all the remaining indices but the last one. 1013 // Also, check that they all index through arrays. 1014 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) { 1015 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType( 1016 GEP1->getSourceElementType(), IntermediateIndices))) 1017 return AliasAnalysis::MayAlias; 1018 IntermediateIndices.push_back(GEP1->getOperand(i + 1)); 1019 } 1020 1021 StructType *LastIndexedStruct = 1022 dyn_cast<StructType>(GetElementPtrInst::getIndexedType( 1023 GEP1->getSourceElementType(), IntermediateIndices)); 1024 1025 if (!LastIndexedStruct) 1026 return AliasAnalysis::MayAlias; 1027 1028 // We know that: 1029 // - both GEPs begin indexing from the exact same pointer; 1030 // - the last indices in both GEPs are constants, indexing into a struct; 1031 // - said indices are different, hence, the pointed-to fields are different; 1032 // - both GEPs only index through arrays prior to that. 1033 // 1034 // This lets us determine that the struct that GEP1 indexes into and the 1035 // struct that GEP2 indexes into must either precisely overlap or be 1036 // completely disjoint. Because they cannot partially overlap, indexing into 1037 // different non-overlapping fields of the struct will never alias. 1038 1039 // Therefore, the only remaining thing needed to show that both GEPs can't 1040 // alias is that the fields are not overlapping. 1041 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct); 1042 const uint64_t StructSize = SL->getSizeInBytes(); 1043 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue()); 1044 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue()); 1045 1046 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size, 1047 uint64_t V2Off, uint64_t V2Size) { 1048 return V1Off < V2Off && V1Off + V1Size <= V2Off && 1049 ((V2Off + V2Size <= StructSize) || 1050 (V2Off + V2Size - StructSize <= V1Off)); 1051 }; 1052 1053 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) || 1054 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size)) 1055 return AliasAnalysis::NoAlias; 1056 1057 return AliasAnalysis::MayAlias; 1058 } 1059 1060 bool BasicAliasAnalysis::constantOffsetHeuristic( 1061 const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size, 1062 uint64_t V2Size, int64_t BaseOffset, const DataLayout *DL, 1063 AssumptionCache *AC, DominatorTree *DT) { 1064 if (VarIndices.size() != 2 || V1Size == UnknownSize || 1065 V2Size == UnknownSize || !DL) 1066 return false; 1067 1068 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1]; 1069 1070 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits || 1071 Var0.Scale != -Var1.Scale) 1072 return false; 1073 1074 unsigned Width = Var1.V->getType()->getIntegerBitWidth(); 1075 1076 // We'll strip off the Extensions of Var0 and Var1 and do another round 1077 // of GetLinearExpression decomposition. In the example above, if Var0 1078 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 1079 1080 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 1), 1081 V1Offset(Width, 1); 1082 bool NSW = true, NUW = true; 1083 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0; 1084 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits, 1085 V0SExtBits, *DL, 0, AC, DT, NSW, NUW); 1086 NSW = true, NUW = true; 1087 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits, 1088 V1SExtBits, *DL, 0, AC, DT, NSW, NUW); 1089 1090 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits || 1091 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1)) 1092 return false; 1093 1094 // We have a hit - Var0 and Var1 only differ by a constant offset! 1095 1096 // If we've been sext'ed then zext'd the maximum difference between Var0 and 1097 // Var1 is possible to calculate, but we're just interested in the absolute 1098 // minumum difference between the two. The minimum distance may occur due to 1099 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 1100 // the minimum distance between %i and %i + 5 is 3. 1101 APInt MinDiff = V0Offset - V1Offset, 1102 Wrapped = APInt::getMaxValue(Width) - MinDiff + APInt(Width, 1); 1103 MinDiff = APIntOps::umin(MinDiff, Wrapped); 1104 uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale); 1105 1106 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 1107 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 1108 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 1109 // V2Size can fit in the MinDiffBytes gap. 1110 return V1Size + std::abs(BaseOffset) <= MinDiffBytes && 1111 V2Size + std::abs(BaseOffset) <= MinDiffBytes; 1112 } 1113 1114 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction 1115 /// against another pointer. We know that V1 is a GEP, but we don't know 1116 /// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, DL), 1117 /// UnderlyingV2 is the same for V2. 1118 /// 1119 AliasAnalysis::AliasResult 1120 BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, 1121 const AAMDNodes &V1AAInfo, 1122 const Value *V2, uint64_t V2Size, 1123 const AAMDNodes &V2AAInfo, 1124 const Value *UnderlyingV1, 1125 const Value *UnderlyingV2) { 1126 int64_t GEP1BaseOffset; 1127 bool GEP1MaxLookupReached; 1128 SmallVector<VariableGEPIndex, 4> GEP1VariableIndices; 1129 1130 // We have to get two AssumptionCaches here because GEP1 and V2 may be from 1131 // different functions. 1132 // FIXME: This really doesn't make any sense. We get a dominator tree below 1133 // that can only refer to a single function. But this function (aliasGEP) is 1134 // a method on an immutable pass that can be called when there *isn't* 1135 // a single function. The old pass management layer makes this "work", but 1136 // this isn't really a clean solution. 1137 AssumptionCacheTracker &ACT = getAnalysis<AssumptionCacheTracker>(); 1138 AssumptionCache *AC1 = nullptr, *AC2 = nullptr; 1139 if (auto *GEP1I = dyn_cast<Instruction>(GEP1)) 1140 AC1 = &ACT.getAssumptionCache( 1141 const_cast<Function &>(*GEP1I->getParent()->getParent())); 1142 if (auto *I2 = dyn_cast<Instruction>(V2)) 1143 AC2 = &ACT.getAssumptionCache( 1144 const_cast<Function &>(*I2->getParent()->getParent())); 1145 1146 DominatorTreeWrapperPass *DTWP = 1147 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 1148 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; 1149 1150 // If we have two gep instructions with must-alias or not-alias'ing base 1151 // pointers, figure out if the indexes to the GEP tell us anything about the 1152 // derived pointer. 1153 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 1154 // Do the base pointers alias? 1155 AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, AAMDNodes(), 1156 UnderlyingV2, UnknownSize, AAMDNodes()); 1157 1158 // Check for geps of non-aliasing underlying pointers where the offsets are 1159 // identical. 1160 if ((BaseAlias == MayAlias) && V1Size == V2Size) { 1161 // Do the base pointers alias assuming type and size. 1162 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, 1163 V1AAInfo, UnderlyingV2, 1164 V2Size, V2AAInfo); 1165 if (PreciseBaseAlias == NoAlias) { 1166 // See if the computed offset from the common pointer tells us about the 1167 // relation of the resulting pointer. 1168 int64_t GEP2BaseOffset; 1169 bool GEP2MaxLookupReached; 1170 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 1171 const Value *GEP2BasePtr = 1172 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, 1173 GEP2MaxLookupReached, *DL, AC2, DT); 1174 const Value *GEP1BasePtr = 1175 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, 1176 GEP1MaxLookupReached, *DL, AC1, DT); 1177 // DecomposeGEPExpression and GetUnderlyingObject should return the 1178 // same result except when DecomposeGEPExpression has no DataLayout. 1179 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 1180 assert(!DL && 1181 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 1182 return MayAlias; 1183 } 1184 // If the max search depth is reached the result is undefined 1185 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1186 return MayAlias; 1187 1188 // Same offsets. 1189 if (GEP1BaseOffset == GEP2BaseOffset && 1190 GEP1VariableIndices == GEP2VariableIndices) 1191 return NoAlias; 1192 GEP1VariableIndices.clear(); 1193 } 1194 } 1195 1196 // If we get a No or May, then return it immediately, no amount of analysis 1197 // will improve this situation. 1198 if (BaseAlias != MustAlias) return BaseAlias; 1199 1200 // Otherwise, we have a MustAlias. Since the base pointers alias each other 1201 // exactly, see if the computed offset from the common pointer tells us 1202 // about the relation of the resulting pointer. 1203 const Value *GEP1BasePtr = 1204 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, 1205 GEP1MaxLookupReached, *DL, AC1, DT); 1206 1207 int64_t GEP2BaseOffset; 1208 bool GEP2MaxLookupReached; 1209 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices; 1210 const Value *GEP2BasePtr = 1211 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, 1212 GEP2MaxLookupReached, *DL, AC2, DT); 1213 1214 // DecomposeGEPExpression and GetUnderlyingObject should return the 1215 // same result except when DecomposeGEPExpression has no DataLayout. 1216 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { 1217 assert(!DL && 1218 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 1219 return MayAlias; 1220 } 1221 1222 // If we know the two GEPs are based off of the exact same pointer (and not 1223 // just the same underlying object), see if that tells us anything about 1224 // the resulting pointers. 1225 if (DL && GEP1->getPointerOperand() == GEP2->getPointerOperand()) { 1226 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, *DL); 1227 // If we couldn't find anything interesting, don't abandon just yet. 1228 if (R != MayAlias) 1229 return R; 1230 } 1231 1232 // If the max search depth is reached the result is undefined 1233 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1234 return MayAlias; 1235 1236 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1237 // symbolic difference. 1238 GEP1BaseOffset -= GEP2BaseOffset; 1239 GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices); 1240 1241 } else { 1242 // Check to see if these two pointers are related by the getelementptr 1243 // instruction. If one pointer is a GEP with a non-zero index of the other 1244 // pointer, we know they cannot alias. 1245 1246 // If both accesses are unknown size, we can't do anything useful here. 1247 if (V1Size == UnknownSize && V2Size == UnknownSize) 1248 return MayAlias; 1249 1250 AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, AAMDNodes(), 1251 V2, V2Size, V2AAInfo); 1252 if (R != MustAlias) 1253 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 1254 // If V2 is known not to alias GEP base pointer, then the two values 1255 // cannot alias per GEP semantics: "A pointer value formed from a 1256 // getelementptr instruction is associated with the addresses associated 1257 // with the first operand of the getelementptr". 1258 return R; 1259 1260 const Value *GEP1BasePtr = 1261 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, 1262 GEP1MaxLookupReached, *DL, AC1, DT); 1263 1264 // DecomposeGEPExpression and GetUnderlyingObject should return the 1265 // same result except when DecomposeGEPExpression has no DataLayout. 1266 if (GEP1BasePtr != UnderlyingV1) { 1267 assert(!DL && 1268 "DecomposeGEPExpression and GetUnderlyingObject disagree!"); 1269 return MayAlias; 1270 } 1271 // If the max search depth is reached the result is undefined 1272 if (GEP1MaxLookupReached) 1273 return MayAlias; 1274 } 1275 1276 // In the two GEP Case, if there is no difference in the offsets of the 1277 // computed pointers, the resultant pointers are a must alias. This 1278 // hapens when we have two lexically identical GEP's (for example). 1279 // 1280 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 1281 // must aliases the GEP, the end result is a must alias also. 1282 if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty()) 1283 return MustAlias; 1284 1285 // If there is a constant difference between the pointers, but the difference 1286 // is less than the size of the associated memory object, then we know 1287 // that the objects are partially overlapping. If the difference is 1288 // greater, we know they do not overlap. 1289 if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) { 1290 if (GEP1BaseOffset >= 0) { 1291 if (V2Size != UnknownSize) { 1292 if ((uint64_t)GEP1BaseOffset < V2Size) 1293 return PartialAlias; 1294 return NoAlias; 1295 } 1296 } else { 1297 // We have the situation where: 1298 // + + 1299 // | BaseOffset | 1300 // ---------------->| 1301 // |-->V1Size |-------> V2Size 1302 // GEP1 V2 1303 // We need to know that V2Size is not unknown, otherwise we might have 1304 // stripped a gep with negative index ('gep <ptr>, -1, ...). 1305 if (V1Size != UnknownSize && V2Size != UnknownSize) { 1306 if (-(uint64_t)GEP1BaseOffset < V1Size) 1307 return PartialAlias; 1308 return NoAlias; 1309 } 1310 } 1311 } 1312 1313 if (!GEP1VariableIndices.empty()) { 1314 uint64_t Modulo = 0; 1315 bool AllPositive = true; 1316 for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i) { 1317 1318 // Try to distinguish something like &A[i][1] against &A[42][0]. 1319 // Grab the least significant bit set in any of the scales. We 1320 // don't need std::abs here (even if the scale's negative) as we'll 1321 // be ^'ing Modulo with itself later. 1322 Modulo |= (uint64_t) GEP1VariableIndices[i].Scale; 1323 1324 if (AllPositive) { 1325 // If the Value could change between cycles, then any reasoning about 1326 // the Value this cycle may not hold in the next cycle. We'll just 1327 // give up if we can't determine conditions that hold for every cycle: 1328 const Value *V = GEP1VariableIndices[i].V; 1329 1330 bool SignKnownZero, SignKnownOne; 1331 ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, *DL, 1332 0, AC1, nullptr, DT); 1333 1334 // Zero-extension widens the variable, and so forces the sign 1335 // bit to zero. 1336 bool IsZExt = GEP1VariableIndices[i].ZExtBits > 0 || isa<ZExtInst>(V); 1337 SignKnownZero |= IsZExt; 1338 SignKnownOne &= !IsZExt; 1339 1340 // If the variable begins with a zero then we know it's 1341 // positive, regardless of whether the value is signed or 1342 // unsigned. 1343 int64_t Scale = GEP1VariableIndices[i].Scale; 1344 AllPositive = 1345 (SignKnownZero && Scale >= 0) || 1346 (SignKnownOne && Scale < 0); 1347 } 1348 } 1349 1350 Modulo = Modulo ^ (Modulo & (Modulo - 1)); 1351 1352 // We can compute the difference between the two addresses 1353 // mod Modulo. Check whether that difference guarantees that the 1354 // two locations do not alias. 1355 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1); 1356 if (V1Size != UnknownSize && V2Size != UnknownSize && 1357 ModOffset >= V2Size && V1Size <= Modulo - ModOffset) 1358 return NoAlias; 1359 1360 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr. 1361 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers 1362 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr. 1363 if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t) GEP1BaseOffset) 1364 return NoAlias; 1365 1366 if (constantOffsetHeuristic(GEP1VariableIndices, V1Size, V2Size, 1367 GEP1BaseOffset, DL, AC1, DT)) 1368 return NoAlias; 1369 } 1370 1371 // Statically, we can see that the base objects are the same, but the 1372 // pointers have dynamic offsets which we can't resolve. And none of our 1373 // little tricks above worked. 1374 // 1375 // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the 1376 // practical effect of this is protecting TBAA in the case of dynamic 1377 // indices into arrays of unions or malloc'd memory. 1378 return PartialAlias; 1379 } 1380 1381 static AliasAnalysis::AliasResult 1382 MergeAliasResults(AliasAnalysis::AliasResult A, AliasAnalysis::AliasResult B) { 1383 // If the results agree, take it. 1384 if (A == B) 1385 return A; 1386 // A mix of PartialAlias and MustAlias is PartialAlias. 1387 if ((A == AliasAnalysis::PartialAlias && B == AliasAnalysis::MustAlias) || 1388 (B == AliasAnalysis::PartialAlias && A == AliasAnalysis::MustAlias)) 1389 return AliasAnalysis::PartialAlias; 1390 // Otherwise, we don't know anything. 1391 return AliasAnalysis::MayAlias; 1392 } 1393 1394 /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select 1395 /// instruction against another. 1396 AliasAnalysis::AliasResult 1397 BasicAliasAnalysis::aliasSelect(const SelectInst *SI, uint64_t SISize, 1398 const AAMDNodes &SIAAInfo, 1399 const Value *V2, uint64_t V2Size, 1400 const AAMDNodes &V2AAInfo) { 1401 // If the values are Selects with the same condition, we can do a more precise 1402 // check: just check for aliases between the values on corresponding arms. 1403 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1404 if (SI->getCondition() == SI2->getCondition()) { 1405 AliasResult Alias = 1406 aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, 1407 SI2->getTrueValue(), V2Size, V2AAInfo); 1408 if (Alias == MayAlias) 1409 return MayAlias; 1410 AliasResult ThisAlias = 1411 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo, 1412 SI2->getFalseValue(), V2Size, V2AAInfo); 1413 return MergeAliasResults(ThisAlias, Alias); 1414 } 1415 1416 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1417 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1418 AliasResult Alias = 1419 aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), SISize, SIAAInfo); 1420 if (Alias == MayAlias) 1421 return MayAlias; 1422 1423 AliasResult ThisAlias = 1424 aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo); 1425 return MergeAliasResults(ThisAlias, Alias); 1426 } 1427 1428 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction 1429 // against another. 1430 AliasAnalysis::AliasResult 1431 BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize, 1432 const AAMDNodes &PNAAInfo, 1433 const Value *V2, uint64_t V2Size, 1434 const AAMDNodes &V2AAInfo) { 1435 // Track phi nodes we have visited. We use this information when we determine 1436 // value equivalence. 1437 VisitedPhiBBs.insert(PN->getParent()); 1438 1439 // If the values are PHIs in the same block, we can do a more precise 1440 // as well as efficient check: just check for aliases between the values 1441 // on corresponding edges. 1442 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1443 if (PN2->getParent() == PN->getParent()) { 1444 LocPair Locs(Location(PN, PNSize, PNAAInfo), 1445 Location(V2, V2Size, V2AAInfo)); 1446 if (PN > V2) 1447 std::swap(Locs.first, Locs.second); 1448 // Analyse the PHIs' inputs under the assumption that the PHIs are 1449 // NoAlias. 1450 // If the PHIs are May/MustAlias there must be (recursively) an input 1451 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or 1452 // there must be an operation on the PHIs within the PHIs' value cycle 1453 // that causes a MayAlias. 1454 // Pretend the phis do not alias. 1455 AliasResult Alias = NoAlias; 1456 assert(AliasCache.count(Locs) && 1457 "There must exist an entry for the phi node"); 1458 AliasResult OrigAliasResult = AliasCache[Locs]; 1459 AliasCache[Locs] = NoAlias; 1460 1461 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1462 AliasResult ThisAlias = 1463 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo, 1464 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1465 V2Size, V2AAInfo); 1466 Alias = MergeAliasResults(ThisAlias, Alias); 1467 if (Alias == MayAlias) 1468 break; 1469 } 1470 1471 // Reset if speculation failed. 1472 if (Alias != NoAlias) 1473 AliasCache[Locs] = OrigAliasResult; 1474 1475 return Alias; 1476 } 1477 1478 SmallPtrSet<Value*, 4> UniqueSrc; 1479 SmallVector<Value*, 4> V1Srcs; 1480 for (Value *PV1 : PN->incoming_values()) { 1481 if (isa<PHINode>(PV1)) 1482 // If any of the source itself is a PHI, return MayAlias conservatively 1483 // to avoid compile time explosion. The worst possible case is if both 1484 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1485 // and 'n' are the number of PHI sources. 1486 return MayAlias; 1487 if (UniqueSrc.insert(PV1).second) 1488 V1Srcs.push_back(PV1); 1489 } 1490 1491 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, 1492 V1Srcs[0], PNSize, PNAAInfo); 1493 // Early exit if the check of the first PHI source against V2 is MayAlias. 1494 // Other results are not possible. 1495 if (Alias == MayAlias) 1496 return MayAlias; 1497 1498 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1499 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1500 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1501 Value *V = V1Srcs[i]; 1502 1503 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, 1504 V, PNSize, PNAAInfo); 1505 Alias = MergeAliasResults(ThisAlias, Alias); 1506 if (Alias == MayAlias) 1507 break; 1508 } 1509 1510 return Alias; 1511 } 1512 1513 // aliasCheck - Provide a bunch of ad-hoc rules to disambiguate in common cases, 1514 // such as array references. 1515 // 1516 AliasAnalysis::AliasResult 1517 BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, 1518 AAMDNodes V1AAInfo, 1519 const Value *V2, uint64_t V2Size, 1520 AAMDNodes V2AAInfo) { 1521 // If either of the memory references is empty, it doesn't matter what the 1522 // pointer values are. 1523 if (V1Size == 0 || V2Size == 0) 1524 return NoAlias; 1525 1526 // Strip off any casts if they exist. 1527 V1 = V1->stripPointerCasts(); 1528 V2 = V2->stripPointerCasts(); 1529 1530 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1531 // value for undef that aliases nothing in the program. 1532 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1533 return NoAlias; 1534 1535 // Are we checking for alias of the same value? 1536 // Because we look 'through' phi nodes we could look at "Value" pointers from 1537 // different iterations. We must therefore make sure that this is not the 1538 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1539 // happen by looking at the visited phi nodes and making sure they cannot 1540 // reach the value. 1541 if (isValueEqualInPotentialCycles(V1, V2)) 1542 return MustAlias; 1543 1544 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1545 return NoAlias; // Scalars cannot alias each other 1546 1547 // Figure out what objects these things are pointing to if we can. 1548 const Value *O1 = GetUnderlyingObject(V1, *DL, MaxLookupSearchDepth); 1549 const Value *O2 = GetUnderlyingObject(V2, *DL, MaxLookupSearchDepth); 1550 1551 // Null values in the default address space don't point to any object, so they 1552 // don't alias any other pointer. 1553 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1554 if (CPN->getType()->getAddressSpace() == 0) 1555 return NoAlias; 1556 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1557 if (CPN->getType()->getAddressSpace() == 0) 1558 return NoAlias; 1559 1560 if (O1 != O2) { 1561 // If V1/V2 point to two different objects we know that we have no alias. 1562 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1563 return NoAlias; 1564 1565 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1566 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1567 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1568 return NoAlias; 1569 1570 // Function arguments can't alias with things that are known to be 1571 // unambigously identified at the function level. 1572 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1573 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1574 return NoAlias; 1575 1576 // Most objects can't alias null. 1577 if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) || 1578 (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2))) 1579 return NoAlias; 1580 1581 // If one pointer is the result of a call/invoke or load and the other is a 1582 // non-escaping local object within the same function, then we know the 1583 // object couldn't escape to a point where the call could return it. 1584 // 1585 // Note that if the pointers are in different functions, there are a 1586 // variety of complications. A call with a nocapture argument may still 1587 // temporary store the nocapture argument's value in a temporary memory 1588 // location if that memory location doesn't escape. Or it may pass a 1589 // nocapture value to other functions as long as they don't capture it. 1590 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2)) 1591 return NoAlias; 1592 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1)) 1593 return NoAlias; 1594 } 1595 1596 // If the size of one access is larger than the entire object on the other 1597 // side, then we know such behavior is undefined and can assume no alias. 1598 if (DL) 1599 if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *DL, *TLI)) || 1600 (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *DL, *TLI))) 1601 return NoAlias; 1602 1603 // Check the cache before climbing up use-def chains. This also terminates 1604 // otherwise infinitely recursive queries. 1605 LocPair Locs(Location(V1, V1Size, V1AAInfo), 1606 Location(V2, V2Size, V2AAInfo)); 1607 if (V1 > V2) 1608 std::swap(Locs.first, Locs.second); 1609 std::pair<AliasCacheTy::iterator, bool> Pair = 1610 AliasCache.insert(std::make_pair(Locs, MayAlias)); 1611 if (!Pair.second) 1612 return Pair.first->second; 1613 1614 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the 1615 // GEP can't simplify, we don't even look at the PHI cases. 1616 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) { 1617 std::swap(V1, V2); 1618 std::swap(V1Size, V2Size); 1619 std::swap(O1, O2); 1620 std::swap(V1AAInfo, V2AAInfo); 1621 } 1622 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1623 AliasResult Result = aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2); 1624 if (Result != MayAlias) return AliasCache[Locs] = Result; 1625 } 1626 1627 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { 1628 std::swap(V1, V2); 1629 std::swap(V1Size, V2Size); 1630 std::swap(V1AAInfo, V2AAInfo); 1631 } 1632 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1633 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, 1634 V2, V2Size, V2AAInfo); 1635 if (Result != MayAlias) return AliasCache[Locs] = Result; 1636 } 1637 1638 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { 1639 std::swap(V1, V2); 1640 std::swap(V1Size, V2Size); 1641 std::swap(V1AAInfo, V2AAInfo); 1642 } 1643 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1644 AliasResult Result = aliasSelect(S1, V1Size, V1AAInfo, 1645 V2, V2Size, V2AAInfo); 1646 if (Result != MayAlias) return AliasCache[Locs] = Result; 1647 } 1648 1649 // If both pointers are pointing into the same object and one of them 1650 // accesses is accessing the entire object, then the accesses must 1651 // overlap in some way. 1652 if (DL && O1 == O2) 1653 if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *DL, *TLI)) || 1654 (V2Size != UnknownSize && isObjectSize(O2, V2Size, *DL, *TLI))) 1655 return AliasCache[Locs] = PartialAlias; 1656 1657 AliasResult Result = 1658 AliasAnalysis::alias(Location(V1, V1Size, V1AAInfo), 1659 Location(V2, V2Size, V2AAInfo)); 1660 return AliasCache[Locs] = Result; 1661 } 1662 1663 bool BasicAliasAnalysis::isValueEqualInPotentialCycles(const Value *V, 1664 const Value *V2) { 1665 if (V != V2) 1666 return false; 1667 1668 const Instruction *Inst = dyn_cast<Instruction>(V); 1669 if (!Inst) 1670 return true; 1671 1672 if (VisitedPhiBBs.empty()) 1673 return true; 1674 1675 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1676 return false; 1677 1678 // Use dominance or loop info if available. 1679 DominatorTreeWrapperPass *DTWP = 1680 getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 1681 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr; 1682 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 1683 LoopInfo *LI = LIWP ? &LIWP->getLoopInfo() : nullptr; 1684 1685 // Make sure that the visited phis cannot reach the Value. This ensures that 1686 // the Values cannot come from different iterations of a potential cycle the 1687 // phi nodes could be involved in. 1688 for (auto *P : VisitedPhiBBs) 1689 if (isPotentiallyReachable(P->begin(), Inst, DT, LI)) 1690 return false; 1691 1692 return true; 1693 } 1694 1695 /// GetIndexDifference - Dest and Src are the variable indices from two 1696 /// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base 1697 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic 1698 /// difference between the two pointers. 1699 void BasicAliasAnalysis::GetIndexDifference( 1700 SmallVectorImpl<VariableGEPIndex> &Dest, 1701 const SmallVectorImpl<VariableGEPIndex> &Src) { 1702 if (Src.empty()) 1703 return; 1704 1705 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 1706 const Value *V = Src[i].V; 1707 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits; 1708 int64_t Scale = Src[i].Scale; 1709 1710 // Find V in Dest. This is N^2, but pointer indices almost never have more 1711 // than a few variable indexes. 1712 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 1713 if (!isValueEqualInPotentialCycles(Dest[j].V, V) || 1714 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits) 1715 continue; 1716 1717 // If we found it, subtract off Scale V's from the entry in Dest. If it 1718 // goes to zero, remove the entry. 1719 if (Dest[j].Scale != Scale) 1720 Dest[j].Scale -= Scale; 1721 else 1722 Dest.erase(Dest.begin() + j); 1723 Scale = 0; 1724 break; 1725 } 1726 1727 // If we didn't consume this entry, add it to the end of the Dest list. 1728 if (Scale) { 1729 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale}; 1730 Dest.push_back(Entry); 1731 } 1732 } 1733 } 1734