1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the primary stateless implementation of the 11 // Alias Analysis interface that implements identities (two different 12 // globals cannot alias, etc), but does no stateful analysis. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/BasicAliasAnalysis.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/CFG.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/LoopInfo.h" 27 #include "llvm/Analysis/MemoryBuiltins.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/CallSite.h" 34 #include "llvm/IR/Constant.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Dominators.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GetElementPtrTypeIterator.h" 41 #include "llvm/IR/GlobalAlias.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/Metadata.h" 49 #include "llvm/IR/Operator.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/Pass.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/KnownBits.h" 58 #include <cassert> 59 #include <cstdint> 60 #include <cstdlib> 61 #include <utility> 62 63 #define DEBUG_TYPE "basicaa" 64 65 using namespace llvm; 66 67 /// Enable analysis of recursive PHI nodes. 68 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden, 69 cl::init(false)); 70 /// SearchLimitReached / SearchTimes shows how often the limit of 71 /// to decompose GEPs is reached. It will affect the precision 72 /// of basic alias analysis. 73 STATISTIC(SearchLimitReached, "Number of times the limit to " 74 "decompose GEPs is reached"); 75 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 76 77 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 78 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 79 /// careful with value equivalence. We use reachability to make sure a value 80 /// cannot be involved in a cycle. 81 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 82 83 // The max limit of the search depth in DecomposeGEPExpression() and 84 // GetUnderlyingObject(), both functions need to use the same search 85 // depth otherwise the algorithm in aliasGEP will assert. 86 static const unsigned MaxLookupSearchDepth = 6; 87 88 bool BasicAAResult::invalidate(Function &F, const PreservedAnalyses &PA, 89 FunctionAnalysisManager::Invalidator &Inv) { 90 // We don't care if this analysis itself is preserved, it has no state. But 91 // we need to check that the analyses it depends on have been. Note that we 92 // may be created without handles to some analyses and in that case don't 93 // depend on them. 94 if (Inv.invalidate<AssumptionAnalysis>(F, PA) || 95 (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)) || 96 (LI && Inv.invalidate<LoopAnalysis>(F, PA))) 97 return true; 98 99 // Otherwise this analysis result remains valid. 100 return false; 101 } 102 103 //===----------------------------------------------------------------------===// 104 // Useful predicates 105 //===----------------------------------------------------------------------===// 106 107 /// Returns true if the pointer is to a function-local object that never 108 /// escapes from the function. 109 static bool isNonEscapingLocalObject(const Value *V) { 110 // If this is a local allocation, check to see if it escapes. 111 if (isa<AllocaInst>(V) || isNoAliasCall(V)) 112 // Set StoreCaptures to True so that we can assume in our callers that the 113 // pointer is not the result of a load instruction. Currently 114 // PointerMayBeCaptured doesn't have any special analysis for the 115 // StoreCaptures=false case; if it did, our callers could be refined to be 116 // more precise. 117 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 118 119 // If this is an argument that corresponds to a byval or noalias argument, 120 // then it has not escaped before entering the function. Check if it escapes 121 // inside the function. 122 if (const Argument *A = dyn_cast<Argument>(V)) 123 if (A->hasByValAttr() || A->hasNoAliasAttr()) 124 // Note even if the argument is marked nocapture, we still need to check 125 // for copies made inside the function. The nocapture attribute only 126 // specifies that there are no copies made that outlive the function. 127 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 128 129 return false; 130 } 131 132 /// Returns true if the pointer is one which would have been considered an 133 /// escape by isNonEscapingLocalObject. 134 static bool isEscapeSource(const Value *V) { 135 if (ImmutableCallSite(V)) 136 return true; 137 138 if (isa<Argument>(V)) 139 return true; 140 141 // The load case works because isNonEscapingLocalObject considers all 142 // stores to be escapes (it passes true for the StoreCaptures argument 143 // to PointerMayBeCaptured). 144 if (isa<LoadInst>(V)) 145 return true; 146 147 return false; 148 } 149 150 /// Returns the size of the object specified by V or UnknownSize if unknown. 151 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 152 const TargetLibraryInfo &TLI, 153 bool RoundToAlign = false) { 154 uint64_t Size; 155 ObjectSizeOpts Opts; 156 Opts.RoundToAlign = RoundToAlign; 157 if (getObjectSize(V, Size, DL, &TLI, Opts)) 158 return Size; 159 return MemoryLocation::UnknownSize; 160 } 161 162 /// Returns true if we can prove that the object specified by V is smaller than 163 /// Size. 164 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 165 const DataLayout &DL, 166 const TargetLibraryInfo &TLI) { 167 // Note that the meanings of the "object" are slightly different in the 168 // following contexts: 169 // c1: llvm::getObjectSize() 170 // c2: llvm.objectsize() intrinsic 171 // c3: isObjectSmallerThan() 172 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 173 // refers to the "entire object". 174 // 175 // Consider this example: 176 // char *p = (char*)malloc(100) 177 // char *q = p+80; 178 // 179 // In the context of c1 and c2, the "object" pointed by q refers to the 180 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 181 // 182 // However, in the context of c3, the "object" refers to the chunk of memory 183 // being allocated. So, the "object" has 100 bytes, and q points to the middle 184 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 185 // parameter, before the llvm::getObjectSize() is called to get the size of 186 // entire object, we should: 187 // - either rewind the pointer q to the base-address of the object in 188 // question (in this case rewind to p), or 189 // - just give up. It is up to caller to make sure the pointer is pointing 190 // to the base address the object. 191 // 192 // We go for 2nd option for simplicity. 193 if (!isIdentifiedObject(V)) 194 return false; 195 196 // This function needs to use the aligned object size because we allow 197 // reads a bit past the end given sufficient alignment. 198 uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true); 199 200 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 201 } 202 203 /// Returns true if we can prove that the object specified by V has size Size. 204 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 205 const TargetLibraryInfo &TLI) { 206 uint64_t ObjectSize = getObjectSize(V, DL, TLI); 207 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 208 } 209 210 //===----------------------------------------------------------------------===// 211 // GetElementPtr Instruction Decomposition and Analysis 212 //===----------------------------------------------------------------------===// 213 214 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 215 /// B are constant integers. 216 /// 217 /// Returns the scale and offset values as APInts and return V as a Value*, and 218 /// return whether we looked through any sign or zero extends. The incoming 219 /// Value is known to have IntegerType, and it may already be sign or zero 220 /// extended. 221 /// 222 /// Note that this looks through extends, so the high bits may not be 223 /// represented in the result. 224 /*static*/ const Value *BasicAAResult::GetLinearExpression( 225 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits, 226 unsigned &SExtBits, const DataLayout &DL, unsigned Depth, 227 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) { 228 assert(V->getType()->isIntegerTy() && "Not an integer value"); 229 230 // Limit our recursion depth. 231 if (Depth == 6) { 232 Scale = 1; 233 Offset = 0; 234 return V; 235 } 236 237 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) { 238 // If it's a constant, just convert it to an offset and remove the variable. 239 // If we've been called recursively, the Offset bit width will be greater 240 // than the constant's (the Offset's always as wide as the outermost call), 241 // so we'll zext here and process any extension in the isa<SExtInst> & 242 // isa<ZExtInst> cases below. 243 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth()); 244 assert(Scale == 0 && "Constant values don't have a scale"); 245 return V; 246 } 247 248 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 249 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 250 // If we've been called recursively, then Offset and Scale will be wider 251 // than the BOp operands. We'll always zext it here as we'll process sign 252 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases). 253 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth()); 254 255 switch (BOp->getOpcode()) { 256 default: 257 // We don't understand this instruction, so we can't decompose it any 258 // further. 259 Scale = 1; 260 Offset = 0; 261 return V; 262 case Instruction::Or: 263 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 264 // analyze it. 265 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 266 BOp, DT)) { 267 Scale = 1; 268 Offset = 0; 269 return V; 270 } 271 LLVM_FALLTHROUGH; 272 case Instruction::Add: 273 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 274 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 275 Offset += RHS; 276 break; 277 case Instruction::Sub: 278 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 279 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 280 Offset -= RHS; 281 break; 282 case Instruction::Mul: 283 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 284 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 285 Offset *= RHS; 286 Scale *= RHS; 287 break; 288 case Instruction::Shl: 289 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 290 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 291 292 // We're trying to linearize an expression of the kind: 293 // shl i8 -128, 36 294 // where the shift count exceeds the bitwidth of the type. 295 // We can't decompose this further (the expression would return 296 // a poison value). 297 if (Offset.getBitWidth() < RHS.getLimitedValue() || 298 Scale.getBitWidth() < RHS.getLimitedValue()) { 299 Scale = 1; 300 Offset = 0; 301 return V; 302 } 303 304 Offset <<= RHS.getLimitedValue(); 305 Scale <<= RHS.getLimitedValue(); 306 // the semantics of nsw and nuw for left shifts don't match those of 307 // multiplications, so we won't propagate them. 308 NSW = NUW = false; 309 return V; 310 } 311 312 if (isa<OverflowingBinaryOperator>(BOp)) { 313 NUW &= BOp->hasNoUnsignedWrap(); 314 NSW &= BOp->hasNoSignedWrap(); 315 } 316 return V; 317 } 318 } 319 320 // Since GEP indices are sign extended anyway, we don't care about the high 321 // bits of a sign or zero extended value - just scales and offsets. The 322 // extensions have to be consistent though. 323 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) { 324 Value *CastOp = cast<CastInst>(V)->getOperand(0); 325 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits(); 326 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 327 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits; 328 const Value *Result = 329 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL, 330 Depth + 1, AC, DT, NSW, NUW); 331 332 // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this 333 // by just incrementing the number of bits we've extended by. 334 unsigned ExtendedBy = NewWidth - SmallWidth; 335 336 if (isa<SExtInst>(V) && ZExtBits == 0) { 337 // sext(sext(%x, a), b) == sext(%x, a + b) 338 339 if (NSW) { 340 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c) 341 // into sext(%x) + sext(c). We'll sext the Offset ourselves: 342 unsigned OldWidth = Offset.getBitWidth(); 343 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth); 344 } else { 345 // We may have signed-wrapped, so don't decompose sext(%x + c) into 346 // sext(%x) + sext(c) 347 Scale = 1; 348 Offset = 0; 349 Result = CastOp; 350 ZExtBits = OldZExtBits; 351 SExtBits = OldSExtBits; 352 } 353 SExtBits += ExtendedBy; 354 } else { 355 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b) 356 357 if (!NUW) { 358 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into 359 // zext(%x) + zext(c) 360 Scale = 1; 361 Offset = 0; 362 Result = CastOp; 363 ZExtBits = OldZExtBits; 364 SExtBits = OldSExtBits; 365 } 366 ZExtBits += ExtendedBy; 367 } 368 369 return Result; 370 } 371 372 Scale = 1; 373 Offset = 0; 374 return V; 375 } 376 377 /// To ensure a pointer offset fits in an integer of size PointerSize 378 /// (in bits) when that size is smaller than 64. This is an issue in 379 /// particular for 32b programs with negative indices that rely on two's 380 /// complement wrap-arounds for precise alias information. 381 static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) { 382 assert(PointerSize <= 64 && "Invalid PointerSize!"); 383 unsigned ShiftBits = 64 - PointerSize; 384 return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits; 385 } 386 387 /// If V is a symbolic pointer expression, decompose it into a base pointer 388 /// with a constant offset and a number of scaled symbolic offsets. 389 /// 390 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 391 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 392 /// specified amount, but which may have other unrepresented high bits. As 393 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 394 /// 395 /// When DataLayout is around, this function is capable of analyzing everything 396 /// that GetUnderlyingObject can look through. To be able to do that 397 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search 398 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks 399 /// through pointer casts. 400 bool BasicAAResult::DecomposeGEPExpression(const Value *V, 401 DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC, 402 DominatorTree *DT) { 403 // Limit recursion depth to limit compile time in crazy cases. 404 unsigned MaxLookup = MaxLookupSearchDepth; 405 SearchTimes++; 406 407 Decomposed.StructOffset = 0; 408 Decomposed.OtherOffset = 0; 409 Decomposed.VarIndices.clear(); 410 do { 411 // See if this is a bitcast or GEP. 412 const Operator *Op = dyn_cast<Operator>(V); 413 if (!Op) { 414 // The only non-operator case we can handle are GlobalAliases. 415 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 416 if (!GA->isInterposable()) { 417 V = GA->getAliasee(); 418 continue; 419 } 420 } 421 Decomposed.Base = V; 422 return false; 423 } 424 425 if (Op->getOpcode() == Instruction::BitCast || 426 Op->getOpcode() == Instruction::AddrSpaceCast) { 427 V = Op->getOperand(0); 428 continue; 429 } 430 431 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 432 if (!GEPOp) { 433 if (auto CS = ImmutableCallSite(V)) { 434 // CaptureTracking can know about special capturing properties of some 435 // intrinsics like launder.invariant.group, that can't be expressed with 436 // the attributes, but have properties like returning aliasing pointer. 437 // Because some analysis may assume that nocaptured pointer is not 438 // returned from some special intrinsic (because function would have to 439 // be marked with returns attribute), it is crucial to use this function 440 // because it should be in sync with CaptureTracking. Not using it may 441 // cause weird miscompilations where 2 aliasing pointers are assumed to 442 // noalias. 443 if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) { 444 V = RP; 445 continue; 446 } 447 } 448 449 // If it's not a GEP, hand it off to SimplifyInstruction to see if it 450 // can come up with something. This matches what GetUnderlyingObject does. 451 if (const Instruction *I = dyn_cast<Instruction>(V)) 452 // TODO: Get a DominatorTree and AssumptionCache and use them here 453 // (these are both now available in this function, but this should be 454 // updated when GetUnderlyingObject is updated). TLI should be 455 // provided also. 456 if (const Value *Simplified = 457 SimplifyInstruction(const_cast<Instruction *>(I), DL)) { 458 V = Simplified; 459 continue; 460 } 461 462 Decomposed.Base = V; 463 return false; 464 } 465 466 // Don't attempt to analyze GEPs over unsized objects. 467 if (!GEPOp->getSourceElementType()->isSized()) { 468 Decomposed.Base = V; 469 return false; 470 } 471 472 unsigned AS = GEPOp->getPointerAddressSpace(); 473 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 474 gep_type_iterator GTI = gep_type_begin(GEPOp); 475 unsigned PointerSize = DL.getPointerSizeInBits(AS); 476 // Assume all GEP operands are constants until proven otherwise. 477 bool GepHasConstantOffset = true; 478 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 479 I != E; ++I, ++GTI) { 480 const Value *Index = *I; 481 // Compute the (potentially symbolic) offset in bytes for this index. 482 if (StructType *STy = GTI.getStructTypeOrNull()) { 483 // For a struct, add the member offset. 484 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 485 if (FieldNo == 0) 486 continue; 487 488 Decomposed.StructOffset += 489 DL.getStructLayout(STy)->getElementOffset(FieldNo); 490 continue; 491 } 492 493 // For an array/pointer, add the element offset, explicitly scaled. 494 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 495 if (CIdx->isZero()) 496 continue; 497 Decomposed.OtherOffset += 498 DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue(); 499 continue; 500 } 501 502 GepHasConstantOffset = false; 503 504 uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType()); 505 unsigned ZExtBits = 0, SExtBits = 0; 506 507 // If the integer type is smaller than the pointer size, it is implicitly 508 // sign extended to pointer size. 509 unsigned Width = Index->getType()->getIntegerBitWidth(); 510 if (PointerSize > Width) 511 SExtBits += PointerSize - Width; 512 513 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 514 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 515 bool NSW = true, NUW = true; 516 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits, 517 SExtBits, DL, 0, AC, DT, NSW, NUW); 518 519 // All GEP math happens in the width of the pointer type, 520 // so we can truncate the value to 64-bits as we don't handle 521 // currently pointers larger than 64 bits and we would crash 522 // later. TODO: Make `Scale` an APInt to avoid this problem. 523 if (IndexScale.getBitWidth() > 64) 524 IndexScale = IndexScale.sextOrTrunc(64); 525 526 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 527 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 528 Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale; 529 Scale *= IndexScale.getSExtValue(); 530 531 // If we already had an occurrence of this index variable, merge this 532 // scale into it. For example, we want to handle: 533 // A[x][x] -> x*16 + x*4 -> x*20 534 // This also ensures that 'x' only appears in the index list once. 535 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 536 if (Decomposed.VarIndices[i].V == Index && 537 Decomposed.VarIndices[i].ZExtBits == ZExtBits && 538 Decomposed.VarIndices[i].SExtBits == SExtBits) { 539 Scale += Decomposed.VarIndices[i].Scale; 540 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 541 break; 542 } 543 } 544 545 // Make sure that we have a scale that makes sense for this target's 546 // pointer size. 547 Scale = adjustToPointerSize(Scale, PointerSize); 548 549 if (Scale) { 550 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, 551 static_cast<int64_t>(Scale)}; 552 Decomposed.VarIndices.push_back(Entry); 553 } 554 } 555 556 // Take care of wrap-arounds 557 if (GepHasConstantOffset) { 558 Decomposed.StructOffset = 559 adjustToPointerSize(Decomposed.StructOffset, PointerSize); 560 Decomposed.OtherOffset = 561 adjustToPointerSize(Decomposed.OtherOffset, PointerSize); 562 } 563 564 // Analyze the base pointer next. 565 V = GEPOp->getOperand(0); 566 } while (--MaxLookup); 567 568 // If the chain of expressions is too deep, just return early. 569 Decomposed.Base = V; 570 SearchLimitReached++; 571 return true; 572 } 573 574 /// Returns whether the given pointer value points to memory that is local to 575 /// the function, with global constants being considered local to all 576 /// functions. 577 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 578 bool OrLocal) { 579 assert(Visited.empty() && "Visited must be cleared after use!"); 580 581 unsigned MaxLookup = 8; 582 SmallVector<const Value *, 16> Worklist; 583 Worklist.push_back(Loc.Ptr); 584 do { 585 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL); 586 if (!Visited.insert(V).second) { 587 Visited.clear(); 588 return AAResultBase::pointsToConstantMemory(Loc, OrLocal); 589 } 590 591 // An alloca instruction defines local memory. 592 if (OrLocal && isa<AllocaInst>(V)) 593 continue; 594 595 // A global constant counts as local memory for our purposes. 596 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 597 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 598 // global to be marked constant in some modules and non-constant in 599 // others. GV may even be a declaration, not a definition. 600 if (!GV->isConstant()) { 601 Visited.clear(); 602 return AAResultBase::pointsToConstantMemory(Loc, OrLocal); 603 } 604 continue; 605 } 606 607 // If both select values point to local memory, then so does the select. 608 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 609 Worklist.push_back(SI->getTrueValue()); 610 Worklist.push_back(SI->getFalseValue()); 611 continue; 612 } 613 614 // If all values incoming to a phi node point to local memory, then so does 615 // the phi. 616 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 617 // Don't bother inspecting phi nodes with many operands. 618 if (PN->getNumIncomingValues() > MaxLookup) { 619 Visited.clear(); 620 return AAResultBase::pointsToConstantMemory(Loc, OrLocal); 621 } 622 for (Value *IncValue : PN->incoming_values()) 623 Worklist.push_back(IncValue); 624 continue; 625 } 626 627 // Otherwise be conservative. 628 Visited.clear(); 629 return AAResultBase::pointsToConstantMemory(Loc, OrLocal); 630 } while (!Worklist.empty() && --MaxLookup); 631 632 Visited.clear(); 633 return Worklist.empty(); 634 } 635 636 /// Returns the behavior when calling the given call site. 637 FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) { 638 if (CS.doesNotAccessMemory()) 639 // Can't do better than this. 640 return FMRB_DoesNotAccessMemory; 641 642 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 643 644 // If the callsite knows it only reads memory, don't return worse 645 // than that. 646 if (CS.onlyReadsMemory()) 647 Min = FMRB_OnlyReadsMemory; 648 else if (CS.doesNotReadMemory()) 649 Min = FMRB_DoesNotReadMemory; 650 651 if (CS.onlyAccessesArgMemory()) 652 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 653 else if (CS.onlyAccessesInaccessibleMemory()) 654 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 655 else if (CS.onlyAccessesInaccessibleMemOrArgMem()) 656 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 657 658 // If CS has operand bundles then aliasing attributes from the function it 659 // calls do not directly apply to the CallSite. This can be made more 660 // precise in the future. 661 if (!CS.hasOperandBundles()) 662 if (const Function *F = CS.getCalledFunction()) 663 Min = 664 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 665 666 return Min; 667 } 668 669 /// Returns the behavior when calling the given function. For use when the call 670 /// site is not known. 671 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 672 // If the function declares it doesn't access memory, we can't do better. 673 if (F->doesNotAccessMemory()) 674 return FMRB_DoesNotAccessMemory; 675 676 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 677 678 // If the function declares it only reads memory, go with that. 679 if (F->onlyReadsMemory()) 680 Min = FMRB_OnlyReadsMemory; 681 else if (F->doesNotReadMemory()) 682 Min = FMRB_DoesNotReadMemory; 683 684 if (F->onlyAccessesArgMemory()) 685 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 686 else if (F->onlyAccessesInaccessibleMemory()) 687 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 688 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 689 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 690 691 return Min; 692 } 693 694 /// Returns true if this is a writeonly (i.e Mod only) parameter. 695 static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx, 696 const TargetLibraryInfo &TLI) { 697 if (CS.paramHasAttr(ArgIdx, Attribute::WriteOnly)) 698 return true; 699 700 // We can bound the aliasing properties of memset_pattern16 just as we can 701 // for memcpy/memset. This is particularly important because the 702 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 703 // whenever possible. 704 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 705 // attributes. 706 LibFunc F; 707 if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) && 708 F == LibFunc_memset_pattern16 && TLI.has(F)) 709 if (ArgIdx == 0) 710 return true; 711 712 // TODO: memset_pattern4, memset_pattern8 713 // TODO: _chk variants 714 // TODO: strcmp, strcpy 715 716 return false; 717 } 718 719 ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS, 720 unsigned ArgIdx) { 721 // Checking for known builtin intrinsics and target library functions. 722 if (isWriteOnlyParam(CS, ArgIdx, TLI)) 723 return ModRefInfo::Mod; 724 725 if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly)) 726 return ModRefInfo::Ref; 727 728 if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone)) 729 return ModRefInfo::NoModRef; 730 731 return AAResultBase::getArgModRefInfo(CS, ArgIdx); 732 } 733 734 static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) { 735 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()); 736 return II && II->getIntrinsicID() == IID; 737 } 738 739 #ifndef NDEBUG 740 static const Function *getParent(const Value *V) { 741 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 742 if (!inst->getParent()) 743 return nullptr; 744 return inst->getParent()->getParent(); 745 } 746 747 if (const Argument *arg = dyn_cast<Argument>(V)) 748 return arg->getParent(); 749 750 return nullptr; 751 } 752 753 static bool notDifferentParent(const Value *O1, const Value *O2) { 754 755 const Function *F1 = getParent(O1); 756 const Function *F2 = getParent(O2); 757 758 return !F1 || !F2 || F1 == F2; 759 } 760 #endif 761 762 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 763 const MemoryLocation &LocB) { 764 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 765 "BasicAliasAnalysis doesn't support interprocedural queries."); 766 767 // If we have a directly cached entry for these locations, we have recursed 768 // through this once, so just return the cached results. Notably, when this 769 // happens, we don't clear the cache. 770 auto CacheIt = AliasCache.find(LocPair(LocA, LocB)); 771 if (CacheIt != AliasCache.end()) 772 return CacheIt->second; 773 774 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr, 775 LocB.Size, LocB.AATags); 776 // AliasCache rarely has more than 1 or 2 elements, always use 777 // shrink_and_clear so it quickly returns to the inline capacity of the 778 // SmallDenseMap if it ever grows larger. 779 // FIXME: This should really be shrink_to_inline_capacity_and_clear(). 780 AliasCache.shrink_and_clear(); 781 VisitedPhiBBs.clear(); 782 return Alias; 783 } 784 785 /// Checks to see if the specified callsite can clobber the specified memory 786 /// object. 787 /// 788 /// Since we only look at local properties of this function, we really can't 789 /// say much about this query. We do, however, use simple "address taken" 790 /// analysis on local objects. 791 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, 792 const MemoryLocation &Loc) { 793 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) && 794 "AliasAnalysis query involving multiple functions!"); 795 796 const Value *Object = GetUnderlyingObject(Loc.Ptr, DL); 797 798 // If this is a tail call and Loc.Ptr points to a stack location, we know that 799 // the tail call cannot access or modify the local stack. 800 // We cannot exclude byval arguments here; these belong to the caller of 801 // the current function not to the current function, and a tail callee 802 // may reference them. 803 if (isa<AllocaInst>(Object)) 804 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) 805 if (CI->isTailCall()) 806 return ModRefInfo::NoModRef; 807 808 // If the pointer is to a locally allocated object that does not escape, 809 // then the call can not mod/ref the pointer unless the call takes the pointer 810 // as an argument, and itself doesn't capture it. 811 if (!isa<Constant>(Object) && CS.getInstruction() != Object && 812 isNonEscapingLocalObject(Object)) { 813 814 // Optimistically assume that call doesn't touch Object and check this 815 // assumption in the following loop. 816 ModRefInfo Result = ModRefInfo::NoModRef; 817 bool IsMustAlias = true; 818 819 unsigned OperandNo = 0; 820 for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end(); 821 CI != CE; ++CI, ++OperandNo) { 822 // Only look at the no-capture or byval pointer arguments. If this 823 // pointer were passed to arguments that were neither of these, then it 824 // couldn't be no-capture. 825 if (!(*CI)->getType()->isPointerTy() || 826 (!CS.doesNotCapture(OperandNo) && 827 OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo))) 828 continue; 829 830 // Call doesn't access memory through this operand, so we don't care 831 // if it aliases with Object. 832 if (CS.doesNotAccessMemory(OperandNo)) 833 continue; 834 835 // If this is a no-capture pointer argument, see if we can tell that it 836 // is impossible to alias the pointer we're checking. 837 AliasResult AR = 838 getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object)); 839 if (AR != MustAlias) 840 IsMustAlias = false; 841 // Operand doesnt alias 'Object', continue looking for other aliases 842 if (AR == NoAlias) 843 continue; 844 // Operand aliases 'Object', but call doesn't modify it. Strengthen 845 // initial assumption and keep looking in case if there are more aliases. 846 if (CS.onlyReadsMemory(OperandNo)) { 847 Result = setRef(Result); 848 continue; 849 } 850 // Operand aliases 'Object' but call only writes into it. 851 if (CS.doesNotReadMemory(OperandNo)) { 852 Result = setMod(Result); 853 continue; 854 } 855 // This operand aliases 'Object' and call reads and writes into it. 856 // Setting ModRef will not yield an early return below, MustAlias is not 857 // used further. 858 Result = ModRefInfo::ModRef; 859 break; 860 } 861 862 // No operand aliases, reset Must bit. Add below if at least one aliases 863 // and all aliases found are MustAlias. 864 if (isNoModRef(Result)) 865 IsMustAlias = false; 866 867 // Early return if we improved mod ref information 868 if (!isModAndRefSet(Result)) { 869 if (isNoModRef(Result)) 870 return ModRefInfo::NoModRef; 871 return IsMustAlias ? setMust(Result) : clearMust(Result); 872 } 873 } 874 875 // If the CallSite is to malloc or calloc, we can assume that it doesn't 876 // modify any IR visible value. This is only valid because we assume these 877 // routines do not read values visible in the IR. TODO: Consider special 878 // casing realloc and strdup routines which access only their arguments as 879 // well. Or alternatively, replace all of this with inaccessiblememonly once 880 // that's implemented fully. 881 auto *Inst = CS.getInstruction(); 882 if (isMallocOrCallocLikeFn(Inst, &TLI)) { 883 // Be conservative if the accessed pointer may alias the allocation - 884 // fallback to the generic handling below. 885 if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias) 886 return ModRefInfo::NoModRef; 887 } 888 889 // The semantics of memcpy intrinsics forbid overlap between their respective 890 // operands, i.e., source and destination of any given memcpy must no-alias. 891 // If Loc must-aliases either one of these two locations, then it necessarily 892 // no-aliases the other. 893 if (auto *Inst = dyn_cast<AnyMemCpyInst>(CS.getInstruction())) { 894 AliasResult SrcAA, DestAA; 895 896 if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst), 897 Loc)) == MustAlias) 898 // Loc is exactly the memcpy source thus disjoint from memcpy dest. 899 return ModRefInfo::Ref; 900 if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst), 901 Loc)) == MustAlias) 902 // The converse case. 903 return ModRefInfo::Mod; 904 905 // It's also possible for Loc to alias both src and dest, or neither. 906 ModRefInfo rv = ModRefInfo::NoModRef; 907 if (SrcAA != NoAlias) 908 rv = setRef(rv); 909 if (DestAA != NoAlias) 910 rv = setMod(rv); 911 return rv; 912 } 913 914 // While the assume intrinsic is marked as arbitrarily writing so that 915 // proper control dependencies will be maintained, it never aliases any 916 // particular memory location. 917 if (isIntrinsicCall(CS, Intrinsic::assume)) 918 return ModRefInfo::NoModRef; 919 920 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 921 // that proper control dependencies are maintained but they never mods any 922 // particular memory location. 923 // 924 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 925 // heap state at the point the guard is issued needs to be consistent in case 926 // the guard invokes the "deopt" continuation. 927 if (isIntrinsicCall(CS, Intrinsic::experimental_guard)) 928 return ModRefInfo::Ref; 929 930 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 931 // writing so that proper control dependencies are maintained but they never 932 // mod any particular memory location visible to the IR. 933 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 934 // intrinsic is now modeled as reading memory. This prevents hoisting the 935 // invariant.start intrinsic over stores. Consider: 936 // *ptr = 40; 937 // *ptr = 50; 938 // invariant_start(ptr) 939 // int val = *ptr; 940 // print(val); 941 // 942 // This cannot be transformed to: 943 // 944 // *ptr = 40; 945 // invariant_start(ptr) 946 // *ptr = 50; 947 // int val = *ptr; 948 // print(val); 949 // 950 // The transformation will cause the second store to be ignored (based on 951 // rules of invariant.start) and print 40, while the first program always 952 // prints 50. 953 if (isIntrinsicCall(CS, Intrinsic::invariant_start)) 954 return ModRefInfo::Ref; 955 956 // The AAResultBase base class has some smarts, lets use them. 957 return AAResultBase::getModRefInfo(CS, Loc); 958 } 959 960 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1, 961 ImmutableCallSite CS2) { 962 // While the assume intrinsic is marked as arbitrarily writing so that 963 // proper control dependencies will be maintained, it never aliases any 964 // particular memory location. 965 if (isIntrinsicCall(CS1, Intrinsic::assume) || 966 isIntrinsicCall(CS2, Intrinsic::assume)) 967 return ModRefInfo::NoModRef; 968 969 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 970 // that proper control dependencies are maintained but they never mod any 971 // particular memory location. 972 // 973 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 974 // heap state at the point the guard is issued needs to be consistent in case 975 // the guard invokes the "deopt" continuation. 976 977 // NB! This function is *not* commutative, so we specical case two 978 // possibilities for guard intrinsics. 979 980 if (isIntrinsicCall(CS1, Intrinsic::experimental_guard)) 981 return isModSet(createModRefInfo(getModRefBehavior(CS2))) 982 ? ModRefInfo::Ref 983 : ModRefInfo::NoModRef; 984 985 if (isIntrinsicCall(CS2, Intrinsic::experimental_guard)) 986 return isModSet(createModRefInfo(getModRefBehavior(CS1))) 987 ? ModRefInfo::Mod 988 : ModRefInfo::NoModRef; 989 990 // The AAResultBase base class has some smarts, lets use them. 991 return AAResultBase::getModRefInfo(CS1, CS2); 992 } 993 994 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators, 995 /// both having the exact same pointer operand. 996 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, 997 LocationSize V1Size, 998 const GEPOperator *GEP2, 999 LocationSize V2Size, 1000 const DataLayout &DL) { 1001 assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() == 1002 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() && 1003 GEP1->getPointerOperandType() == GEP2->getPointerOperandType() && 1004 "Expected GEPs with the same pointer operand"); 1005 1006 // Try to determine whether GEP1 and GEP2 index through arrays, into structs, 1007 // such that the struct field accesses provably cannot alias. 1008 // We also need at least two indices (the pointer, and the struct field). 1009 if (GEP1->getNumIndices() != GEP2->getNumIndices() || 1010 GEP1->getNumIndices() < 2) 1011 return MayAlias; 1012 1013 // If we don't know the size of the accesses through both GEPs, we can't 1014 // determine whether the struct fields accessed can't alias. 1015 if (V1Size == MemoryLocation::UnknownSize || 1016 V2Size == MemoryLocation::UnknownSize) 1017 return MayAlias; 1018 1019 ConstantInt *C1 = 1020 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1)); 1021 ConstantInt *C2 = 1022 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1)); 1023 1024 // If the last (struct) indices are constants and are equal, the other indices 1025 // might be also be dynamically equal, so the GEPs can alias. 1026 if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue()) 1027 return MayAlias; 1028 1029 // Find the last-indexed type of the GEP, i.e., the type you'd get if 1030 // you stripped the last index. 1031 // On the way, look at each indexed type. If there's something other 1032 // than an array, different indices can lead to different final types. 1033 SmallVector<Value *, 8> IntermediateIndices; 1034 1035 // Insert the first index; we don't need to check the type indexed 1036 // through it as it only drops the pointer indirection. 1037 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine"); 1038 IntermediateIndices.push_back(GEP1->getOperand(1)); 1039 1040 // Insert all the remaining indices but the last one. 1041 // Also, check that they all index through arrays. 1042 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) { 1043 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType( 1044 GEP1->getSourceElementType(), IntermediateIndices))) 1045 return MayAlias; 1046 IntermediateIndices.push_back(GEP1->getOperand(i + 1)); 1047 } 1048 1049 auto *Ty = GetElementPtrInst::getIndexedType( 1050 GEP1->getSourceElementType(), IntermediateIndices); 1051 StructType *LastIndexedStruct = dyn_cast<StructType>(Ty); 1052 1053 if (isa<SequentialType>(Ty)) { 1054 // We know that: 1055 // - both GEPs begin indexing from the exact same pointer; 1056 // - the last indices in both GEPs are constants, indexing into a sequential 1057 // type (array or pointer); 1058 // - both GEPs only index through arrays prior to that. 1059 // 1060 // Because array indices greater than the number of elements are valid in 1061 // GEPs, unless we know the intermediate indices are identical between 1062 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't 1063 // partially overlap. We also need to check that the loaded size matches 1064 // the element size, otherwise we could still have overlap. 1065 const uint64_t ElementSize = 1066 DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType()); 1067 if (V1Size != ElementSize || V2Size != ElementSize) 1068 return MayAlias; 1069 1070 for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i) 1071 if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1)) 1072 return MayAlias; 1073 1074 // Now we know that the array/pointer that GEP1 indexes into and that 1075 // that GEP2 indexes into must either precisely overlap or be disjoint. 1076 // Because they cannot partially overlap and because fields in an array 1077 // cannot overlap, if we can prove the final indices are different between 1078 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias. 1079 1080 // If the last indices are constants, we've already checked they don't 1081 // equal each other so we can exit early. 1082 if (C1 && C2) 1083 return NoAlias; 1084 { 1085 Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1); 1086 Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1); 1087 if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) { 1088 // If one of the indices is a PHI node, be safe and only use 1089 // computeKnownBits so we don't make any assumptions about the 1090 // relationships between the two indices. This is important if we're 1091 // asking about values from different loop iterations. See PR32314. 1092 // TODO: We may be able to change the check so we only do this when 1093 // we definitely looked through a PHINode. 1094 if (GEP1LastIdx != GEP2LastIdx && 1095 GEP1LastIdx->getType() == GEP2LastIdx->getType()) { 1096 KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL); 1097 KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL); 1098 if (Known1.Zero.intersects(Known2.One) || 1099 Known1.One.intersects(Known2.Zero)) 1100 return NoAlias; 1101 } 1102 } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL)) 1103 return NoAlias; 1104 } 1105 return MayAlias; 1106 } else if (!LastIndexedStruct || !C1 || !C2) { 1107 return MayAlias; 1108 } 1109 1110 // We know that: 1111 // - both GEPs begin indexing from the exact same pointer; 1112 // - the last indices in both GEPs are constants, indexing into a struct; 1113 // - said indices are different, hence, the pointed-to fields are different; 1114 // - both GEPs only index through arrays prior to that. 1115 // 1116 // This lets us determine that the struct that GEP1 indexes into and the 1117 // struct that GEP2 indexes into must either precisely overlap or be 1118 // completely disjoint. Because they cannot partially overlap, indexing into 1119 // different non-overlapping fields of the struct will never alias. 1120 1121 // Therefore, the only remaining thing needed to show that both GEPs can't 1122 // alias is that the fields are not overlapping. 1123 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct); 1124 const uint64_t StructSize = SL->getSizeInBytes(); 1125 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue()); 1126 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue()); 1127 1128 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size, 1129 uint64_t V2Off, uint64_t V2Size) { 1130 return V1Off < V2Off && V1Off + V1Size <= V2Off && 1131 ((V2Off + V2Size <= StructSize) || 1132 (V2Off + V2Size - StructSize <= V1Off)); 1133 }; 1134 1135 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) || 1136 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size)) 1137 return NoAlias; 1138 1139 return MayAlias; 1140 } 1141 1142 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the 1143 // beginning of the object the GEP points would have a negative offset with 1144 // repsect to the alloca, that means the GEP can not alias pointer (b). 1145 // Note that the pointer based on the alloca may not be a GEP. For 1146 // example, it may be the alloca itself. 1147 // The same applies if (b) is based on a GlobalVariable. Note that just being 1148 // based on isIdentifiedObject() is not enough - we need an identified object 1149 // that does not permit access to negative offsets. For example, a negative 1150 // offset from a noalias argument or call can be inbounds w.r.t the actual 1151 // underlying object. 1152 // 1153 // For example, consider: 1154 // 1155 // struct { int f0, int f1, ...} foo; 1156 // foo alloca; 1157 // foo* random = bar(alloca); 1158 // int *f0 = &alloca.f0 1159 // int *f1 = &random->f1; 1160 // 1161 // Which is lowered, approximately, to: 1162 // 1163 // %alloca = alloca %struct.foo 1164 // %random = call %struct.foo* @random(%struct.foo* %alloca) 1165 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0 1166 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1 1167 // 1168 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated 1169 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also 1170 // point into the same object. But since %f0 points to the beginning of %alloca, 1171 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher 1172 // than (%alloca - 1), and so is not inbounds, a contradiction. 1173 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp, 1174 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject, 1175 LocationSize ObjectAccessSize) { 1176 // If the object access size is unknown, or the GEP isn't inbounds, bail. 1177 if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds()) 1178 return false; 1179 1180 // We need the object to be an alloca or a globalvariable, and want to know 1181 // the offset of the pointer from the object precisely, so no variable 1182 // indices are allowed. 1183 if (!(isa<AllocaInst>(DecompObject.Base) || 1184 isa<GlobalVariable>(DecompObject.Base)) || 1185 !DecompObject.VarIndices.empty()) 1186 return false; 1187 1188 int64_t ObjectBaseOffset = DecompObject.StructOffset + 1189 DecompObject.OtherOffset; 1190 1191 // If the GEP has no variable indices, we know the precise offset 1192 // from the base, then use it. If the GEP has variable indices, 1193 // we can't get exact GEP offset to identify pointer alias. So return 1194 // false in that case. 1195 if (!DecompGEP.VarIndices.empty()) 1196 return false; 1197 int64_t GEPBaseOffset = DecompGEP.StructOffset; 1198 GEPBaseOffset += DecompGEP.OtherOffset; 1199 1200 return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize); 1201 } 1202 1203 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1204 /// another pointer. 1205 /// 1206 /// We know that V1 is a GEP, but we don't know anything about V2. 1207 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for 1208 /// V2. 1209 AliasResult 1210 BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size, 1211 const AAMDNodes &V1AAInfo, const Value *V2, 1212 LocationSize V2Size, const AAMDNodes &V2AAInfo, 1213 const Value *UnderlyingV1, const Value *UnderlyingV2) { 1214 DecomposedGEP DecompGEP1, DecompGEP2; 1215 bool GEP1MaxLookupReached = 1216 DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT); 1217 bool GEP2MaxLookupReached = 1218 DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT); 1219 1220 int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset; 1221 int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset; 1222 1223 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 && 1224 "DecomposeGEPExpression returned a result different from " 1225 "GetUnderlyingObject"); 1226 1227 // If the GEP's offset relative to its base is such that the base would 1228 // fall below the start of the object underlying V2, then the GEP and V2 1229 // cannot alias. 1230 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached && 1231 isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size)) 1232 return NoAlias; 1233 // If we have two gep instructions with must-alias or not-alias'ing base 1234 // pointers, figure out if the indexes to the GEP tell us anything about the 1235 // derived pointer. 1236 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 1237 // Check for the GEP base being at a negative offset, this time in the other 1238 // direction. 1239 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached && 1240 isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size)) 1241 return NoAlias; 1242 // Do the base pointers alias? 1243 AliasResult BaseAlias = 1244 aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(), 1245 UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes()); 1246 1247 // Check for geps of non-aliasing underlying pointers where the offsets are 1248 // identical. 1249 if ((BaseAlias == MayAlias) && V1Size == V2Size) { 1250 // Do the base pointers alias assuming type and size. 1251 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo, 1252 UnderlyingV2, V2Size, V2AAInfo); 1253 if (PreciseBaseAlias == NoAlias) { 1254 // See if the computed offset from the common pointer tells us about the 1255 // relation of the resulting pointer. 1256 // If the max search depth is reached the result is undefined 1257 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1258 return MayAlias; 1259 1260 // Same offsets. 1261 if (GEP1BaseOffset == GEP2BaseOffset && 1262 DecompGEP1.VarIndices == DecompGEP2.VarIndices) 1263 return NoAlias; 1264 } 1265 } 1266 1267 // If we get a No or May, then return it immediately, no amount of analysis 1268 // will improve this situation. 1269 if (BaseAlias != MustAlias) { 1270 assert(BaseAlias == NoAlias || BaseAlias == MayAlias); 1271 return BaseAlias; 1272 } 1273 1274 // Otherwise, we have a MustAlias. Since the base pointers alias each other 1275 // exactly, see if the computed offset from the common pointer tells us 1276 // about the relation of the resulting pointer. 1277 // If we know the two GEPs are based off of the exact same pointer (and not 1278 // just the same underlying object), see if that tells us anything about 1279 // the resulting pointers. 1280 if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() == 1281 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() && 1282 GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) { 1283 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL); 1284 // If we couldn't find anything interesting, don't abandon just yet. 1285 if (R != MayAlias) 1286 return R; 1287 } 1288 1289 // If the max search depth is reached, the result is undefined 1290 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1291 return MayAlias; 1292 1293 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1294 // symbolic difference. 1295 GEP1BaseOffset -= GEP2BaseOffset; 1296 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices); 1297 1298 } else { 1299 // Check to see if these two pointers are related by the getelementptr 1300 // instruction. If one pointer is a GEP with a non-zero index of the other 1301 // pointer, we know they cannot alias. 1302 1303 // If both accesses are unknown size, we can't do anything useful here. 1304 if (V1Size == MemoryLocation::UnknownSize && 1305 V2Size == MemoryLocation::UnknownSize) 1306 return MayAlias; 1307 1308 AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, 1309 AAMDNodes(), V2, MemoryLocation::UnknownSize, 1310 V2AAInfo, nullptr, UnderlyingV2); 1311 if (R != MustAlias) { 1312 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 1313 // If V2 is known not to alias GEP base pointer, then the two values 1314 // cannot alias per GEP semantics: "Any memory access must be done through 1315 // a pointer value associated with an address range of the memory access, 1316 // otherwise the behavior is undefined.". 1317 assert(R == NoAlias || R == MayAlias); 1318 return R; 1319 } 1320 1321 // If the max search depth is reached the result is undefined 1322 if (GEP1MaxLookupReached) 1323 return MayAlias; 1324 } 1325 1326 // In the two GEP Case, if there is no difference in the offsets of the 1327 // computed pointers, the resultant pointers are a must alias. This 1328 // happens when we have two lexically identical GEP's (for example). 1329 // 1330 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 1331 // must aliases the GEP, the end result is a must alias also. 1332 if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty()) 1333 return MustAlias; 1334 1335 // If there is a constant difference between the pointers, but the difference 1336 // is less than the size of the associated memory object, then we know 1337 // that the objects are partially overlapping. If the difference is 1338 // greater, we know they do not overlap. 1339 if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) { 1340 if (GEP1BaseOffset >= 0) { 1341 if (V2Size != MemoryLocation::UnknownSize) { 1342 if ((uint64_t)GEP1BaseOffset < V2Size) 1343 return PartialAlias; 1344 return NoAlias; 1345 } 1346 } else { 1347 // We have the situation where: 1348 // + + 1349 // | BaseOffset | 1350 // ---------------->| 1351 // |-->V1Size |-------> V2Size 1352 // GEP1 V2 1353 // We need to know that V2Size is not unknown, otherwise we might have 1354 // stripped a gep with negative index ('gep <ptr>, -1, ...). 1355 if (V1Size != MemoryLocation::UnknownSize && 1356 V2Size != MemoryLocation::UnknownSize) { 1357 if (-(uint64_t)GEP1BaseOffset < V1Size) 1358 return PartialAlias; 1359 return NoAlias; 1360 } 1361 } 1362 } 1363 1364 if (!DecompGEP1.VarIndices.empty()) { 1365 uint64_t Modulo = 0; 1366 bool AllPositive = true; 1367 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1368 1369 // Try to distinguish something like &A[i][1] against &A[42][0]. 1370 // Grab the least significant bit set in any of the scales. We 1371 // don't need std::abs here (even if the scale's negative) as we'll 1372 // be ^'ing Modulo with itself later. 1373 Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale; 1374 1375 if (AllPositive) { 1376 // If the Value could change between cycles, then any reasoning about 1377 // the Value this cycle may not hold in the next cycle. We'll just 1378 // give up if we can't determine conditions that hold for every cycle: 1379 const Value *V = DecompGEP1.VarIndices[i].V; 1380 1381 KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT); 1382 bool SignKnownZero = Known.isNonNegative(); 1383 bool SignKnownOne = Known.isNegative(); 1384 1385 // Zero-extension widens the variable, and so forces the sign 1386 // bit to zero. 1387 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V); 1388 SignKnownZero |= IsZExt; 1389 SignKnownOne &= !IsZExt; 1390 1391 // If the variable begins with a zero then we know it's 1392 // positive, regardless of whether the value is signed or 1393 // unsigned. 1394 int64_t Scale = DecompGEP1.VarIndices[i].Scale; 1395 AllPositive = 1396 (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0); 1397 } 1398 } 1399 1400 Modulo = Modulo ^ (Modulo & (Modulo - 1)); 1401 1402 // We can compute the difference between the two addresses 1403 // mod Modulo. Check whether that difference guarantees that the 1404 // two locations do not alias. 1405 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1); 1406 if (V1Size != MemoryLocation::UnknownSize && 1407 V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size && 1408 V1Size <= Modulo - ModOffset) 1409 return NoAlias; 1410 1411 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr. 1412 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers 1413 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr. 1414 if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset) 1415 return NoAlias; 1416 1417 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size, 1418 GEP1BaseOffset, &AC, DT)) 1419 return NoAlias; 1420 } 1421 1422 // Statically, we can see that the base objects are the same, but the 1423 // pointers have dynamic offsets which we can't resolve. And none of our 1424 // little tricks above worked. 1425 return MayAlias; 1426 } 1427 1428 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1429 // If the results agree, take it. 1430 if (A == B) 1431 return A; 1432 // A mix of PartialAlias and MustAlias is PartialAlias. 1433 if ((A == PartialAlias && B == MustAlias) || 1434 (B == PartialAlias && A == MustAlias)) 1435 return PartialAlias; 1436 // Otherwise, we don't know anything. 1437 return MayAlias; 1438 } 1439 1440 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1441 /// against another. 1442 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, 1443 LocationSize SISize, 1444 const AAMDNodes &SIAAInfo, 1445 const Value *V2, LocationSize V2Size, 1446 const AAMDNodes &V2AAInfo, 1447 const Value *UnderV2) { 1448 // If the values are Selects with the same condition, we can do a more precise 1449 // check: just check for aliases between the values on corresponding arms. 1450 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1451 if (SI->getCondition() == SI2->getCondition()) { 1452 AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, 1453 SI2->getTrueValue(), V2Size, V2AAInfo); 1454 if (Alias == MayAlias) 1455 return MayAlias; 1456 AliasResult ThisAlias = 1457 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo, 1458 SI2->getFalseValue(), V2Size, V2AAInfo); 1459 return MergeAliasResults(ThisAlias, Alias); 1460 } 1461 1462 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1463 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1464 AliasResult Alias = 1465 aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), 1466 SISize, SIAAInfo, UnderV2); 1467 if (Alias == MayAlias) 1468 return MayAlias; 1469 1470 AliasResult ThisAlias = 1471 aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo, 1472 UnderV2); 1473 return MergeAliasResults(ThisAlias, Alias); 1474 } 1475 1476 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1477 /// another. 1478 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, 1479 const AAMDNodes &PNAAInfo, const Value *V2, 1480 LocationSize V2Size, 1481 const AAMDNodes &V2AAInfo, 1482 const Value *UnderV2) { 1483 // Track phi nodes we have visited. We use this information when we determine 1484 // value equivalence. 1485 VisitedPhiBBs.insert(PN->getParent()); 1486 1487 // If the values are PHIs in the same block, we can do a more precise 1488 // as well as efficient check: just check for aliases between the values 1489 // on corresponding edges. 1490 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1491 if (PN2->getParent() == PN->getParent()) { 1492 LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo), 1493 MemoryLocation(V2, V2Size, V2AAInfo)); 1494 if (PN > V2) 1495 std::swap(Locs.first, Locs.second); 1496 // Analyse the PHIs' inputs under the assumption that the PHIs are 1497 // NoAlias. 1498 // If the PHIs are May/MustAlias there must be (recursively) an input 1499 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or 1500 // there must be an operation on the PHIs within the PHIs' value cycle 1501 // that causes a MayAlias. 1502 // Pretend the phis do not alias. 1503 AliasResult Alias = NoAlias; 1504 assert(AliasCache.count(Locs) && 1505 "There must exist an entry for the phi node"); 1506 AliasResult OrigAliasResult = AliasCache[Locs]; 1507 AliasCache[Locs] = NoAlias; 1508 1509 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1510 AliasResult ThisAlias = 1511 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo, 1512 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1513 V2Size, V2AAInfo); 1514 Alias = MergeAliasResults(ThisAlias, Alias); 1515 if (Alias == MayAlias) 1516 break; 1517 } 1518 1519 // Reset if speculation failed. 1520 if (Alias != NoAlias) 1521 AliasCache[Locs] = OrigAliasResult; 1522 1523 return Alias; 1524 } 1525 1526 SmallPtrSet<Value *, 4> UniqueSrc; 1527 SmallVector<Value *, 4> V1Srcs; 1528 bool isRecursive = false; 1529 for (Value *PV1 : PN->incoming_values()) { 1530 if (isa<PHINode>(PV1)) 1531 // If any of the source itself is a PHI, return MayAlias conservatively 1532 // to avoid compile time explosion. The worst possible case is if both 1533 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1534 // and 'n' are the number of PHI sources. 1535 return MayAlias; 1536 1537 if (EnableRecPhiAnalysis) 1538 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) { 1539 // Check whether the incoming value is a GEP that advances the pointer 1540 // result of this PHI node (e.g. in a loop). If this is the case, we 1541 // would recurse and always get a MayAlias. Handle this case specially 1542 // below. 1543 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 && 1544 isa<ConstantInt>(PV1GEP->idx_begin())) { 1545 isRecursive = true; 1546 continue; 1547 } 1548 } 1549 1550 if (UniqueSrc.insert(PV1).second) 1551 V1Srcs.push_back(PV1); 1552 } 1553 1554 // If this PHI node is recursive, set the size of the accessed memory to 1555 // unknown to represent all the possible values the GEP could advance the 1556 // pointer to. 1557 if (isRecursive) 1558 PNSize = MemoryLocation::UnknownSize; 1559 1560 AliasResult Alias = 1561 aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], 1562 PNSize, PNAAInfo, UnderV2); 1563 1564 // Early exit if the check of the first PHI source against V2 is MayAlias. 1565 // Other results are not possible. 1566 if (Alias == MayAlias) 1567 return MayAlias; 1568 1569 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1570 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1571 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1572 Value *V = V1Srcs[i]; 1573 1574 AliasResult ThisAlias = 1575 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2); 1576 Alias = MergeAliasResults(ThisAlias, Alias); 1577 if (Alias == MayAlias) 1578 break; 1579 } 1580 1581 return Alias; 1582 } 1583 1584 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1585 /// array references. 1586 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, 1587 AAMDNodes V1AAInfo, const Value *V2, 1588 LocationSize V2Size, AAMDNodes V2AAInfo, 1589 const Value *O1, const Value *O2) { 1590 // If either of the memory references is empty, it doesn't matter what the 1591 // pointer values are. 1592 if (V1Size == 0 || V2Size == 0) 1593 return NoAlias; 1594 1595 // Strip off any casts if they exist. 1596 V1 = V1->stripPointerCastsAndInvariantGroups(); 1597 V2 = V2->stripPointerCastsAndInvariantGroups(); 1598 1599 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1600 // value for undef that aliases nothing in the program. 1601 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1602 return NoAlias; 1603 1604 // Are we checking for alias of the same value? 1605 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1606 // different iterations. We must therefore make sure that this is not the 1607 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1608 // happen by looking at the visited phi nodes and making sure they cannot 1609 // reach the value. 1610 if (isValueEqualInPotentialCycles(V1, V2)) 1611 return MustAlias; 1612 1613 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1614 return NoAlias; // Scalars cannot alias each other 1615 1616 // Figure out what objects these things are pointing to if we can. 1617 if (O1 == nullptr) 1618 O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth); 1619 1620 if (O2 == nullptr) 1621 O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth); 1622 1623 // Null values in the default address space don't point to any object, so they 1624 // don't alias any other pointer. 1625 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1626 if (CPN->getType()->getAddressSpace() == 0) 1627 return NoAlias; 1628 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1629 if (CPN->getType()->getAddressSpace() == 0) 1630 return NoAlias; 1631 1632 if (O1 != O2) { 1633 // If V1/V2 point to two different objects, we know that we have no alias. 1634 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1635 return NoAlias; 1636 1637 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1638 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1639 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1640 return NoAlias; 1641 1642 // Function arguments can't alias with things that are known to be 1643 // unambigously identified at the function level. 1644 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1645 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1646 return NoAlias; 1647 1648 // If one pointer is the result of a call/invoke or load and the other is a 1649 // non-escaping local object within the same function, then we know the 1650 // object couldn't escape to a point where the call could return it. 1651 // 1652 // Note that if the pointers are in different functions, there are a 1653 // variety of complications. A call with a nocapture argument may still 1654 // temporary store the nocapture argument's value in a temporary memory 1655 // location if that memory location doesn't escape. Or it may pass a 1656 // nocapture value to other functions as long as they don't capture it. 1657 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2)) 1658 return NoAlias; 1659 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1)) 1660 return NoAlias; 1661 } 1662 1663 // If the size of one access is larger than the entire object on the other 1664 // side, then we know such behavior is undefined and can assume no alias. 1665 if ((V1Size != MemoryLocation::UnknownSize && 1666 isObjectSmallerThan(O2, V1Size, DL, TLI)) || 1667 (V2Size != MemoryLocation::UnknownSize && 1668 isObjectSmallerThan(O1, V2Size, DL, TLI))) 1669 return NoAlias; 1670 1671 // Check the cache before climbing up use-def chains. This also terminates 1672 // otherwise infinitely recursive queries. 1673 LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo), 1674 MemoryLocation(V2, V2Size, V2AAInfo)); 1675 if (V1 > V2) 1676 std::swap(Locs.first, Locs.second); 1677 std::pair<AliasCacheTy::iterator, bool> Pair = 1678 AliasCache.insert(std::make_pair(Locs, MayAlias)); 1679 if (!Pair.second) 1680 return Pair.first->second; 1681 1682 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the 1683 // GEP can't simplify, we don't even look at the PHI cases. 1684 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) { 1685 std::swap(V1, V2); 1686 std::swap(V1Size, V2Size); 1687 std::swap(O1, O2); 1688 std::swap(V1AAInfo, V2AAInfo); 1689 } 1690 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1691 AliasResult Result = 1692 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2); 1693 if (Result != MayAlias) 1694 return AliasCache[Locs] = Result; 1695 } 1696 1697 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { 1698 std::swap(V1, V2); 1699 std::swap(O1, O2); 1700 std::swap(V1Size, V2Size); 1701 std::swap(V1AAInfo, V2AAInfo); 1702 } 1703 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1704 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, 1705 V2, V2Size, V2AAInfo, O2); 1706 if (Result != MayAlias) 1707 return AliasCache[Locs] = Result; 1708 } 1709 1710 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { 1711 std::swap(V1, V2); 1712 std::swap(O1, O2); 1713 std::swap(V1Size, V2Size); 1714 std::swap(V1AAInfo, V2AAInfo); 1715 } 1716 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1717 AliasResult Result = 1718 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2); 1719 if (Result != MayAlias) 1720 return AliasCache[Locs] = Result; 1721 } 1722 1723 // If both pointers are pointing into the same object and one of them 1724 // accesses the entire object, then the accesses must overlap in some way. 1725 if (O1 == O2) 1726 if (V1Size != MemoryLocation::UnknownSize && 1727 V2Size != MemoryLocation::UnknownSize && 1728 (isObjectSize(O1, V1Size, DL, TLI) || 1729 isObjectSize(O2, V2Size, DL, TLI))) 1730 return AliasCache[Locs] = PartialAlias; 1731 1732 // Recurse back into the best AA results we have, potentially with refined 1733 // memory locations. We have already ensured that BasicAA has a MayAlias 1734 // cache result for these, so any recursion back into BasicAA won't loop. 1735 AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second); 1736 return AliasCache[Locs] = Result; 1737 } 1738 1739 /// Check whether two Values can be considered equivalent. 1740 /// 1741 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1742 /// they can not be part of a cycle in the value graph by looking at all 1743 /// visited phi nodes an making sure that the phis cannot reach the value. We 1744 /// have to do this because we are looking through phi nodes (That is we say 1745 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1746 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1747 const Value *V2) { 1748 if (V != V2) 1749 return false; 1750 1751 const Instruction *Inst = dyn_cast<Instruction>(V); 1752 if (!Inst) 1753 return true; 1754 1755 if (VisitedPhiBBs.empty()) 1756 return true; 1757 1758 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1759 return false; 1760 1761 // Make sure that the visited phis cannot reach the Value. This ensures that 1762 // the Values cannot come from different iterations of a potential cycle the 1763 // phi nodes could be involved in. 1764 for (auto *P : VisitedPhiBBs) 1765 if (isPotentiallyReachable(&P->front(), Inst, DT, LI)) 1766 return false; 1767 1768 return true; 1769 } 1770 1771 /// Computes the symbolic difference between two de-composed GEPs. 1772 /// 1773 /// Dest and Src are the variable indices from two decomposed GetElementPtr 1774 /// instructions GEP1 and GEP2 which have common base pointers. 1775 void BasicAAResult::GetIndexDifference( 1776 SmallVectorImpl<VariableGEPIndex> &Dest, 1777 const SmallVectorImpl<VariableGEPIndex> &Src) { 1778 if (Src.empty()) 1779 return; 1780 1781 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 1782 const Value *V = Src[i].V; 1783 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits; 1784 int64_t Scale = Src[i].Scale; 1785 1786 // Find V in Dest. This is N^2, but pointer indices almost never have more 1787 // than a few variable indexes. 1788 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 1789 if (!isValueEqualInPotentialCycles(Dest[j].V, V) || 1790 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits) 1791 continue; 1792 1793 // If we found it, subtract off Scale V's from the entry in Dest. If it 1794 // goes to zero, remove the entry. 1795 if (Dest[j].Scale != Scale) 1796 Dest[j].Scale -= Scale; 1797 else 1798 Dest.erase(Dest.begin() + j); 1799 Scale = 0; 1800 break; 1801 } 1802 1803 // If we didn't consume this entry, add it to the end of the Dest list. 1804 if (Scale) { 1805 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale}; 1806 Dest.push_back(Entry); 1807 } 1808 } 1809 } 1810 1811 bool BasicAAResult::constantOffsetHeuristic( 1812 const SmallVectorImpl<VariableGEPIndex> &VarIndices, LocationSize V1Size, 1813 LocationSize V2Size, int64_t BaseOffset, AssumptionCache *AC, 1814 DominatorTree *DT) { 1815 if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize || 1816 V2Size == MemoryLocation::UnknownSize) 1817 return false; 1818 1819 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1]; 1820 1821 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits || 1822 Var0.Scale != -Var1.Scale) 1823 return false; 1824 1825 unsigned Width = Var1.V->getType()->getIntegerBitWidth(); 1826 1827 // We'll strip off the Extensions of Var0 and Var1 and do another round 1828 // of GetLinearExpression decomposition. In the example above, if Var0 1829 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 1830 1831 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0), 1832 V1Offset(Width, 0); 1833 bool NSW = true, NUW = true; 1834 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0; 1835 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits, 1836 V0SExtBits, DL, 0, AC, DT, NSW, NUW); 1837 NSW = true; 1838 NUW = true; 1839 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits, 1840 V1SExtBits, DL, 0, AC, DT, NSW, NUW); 1841 1842 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits || 1843 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1)) 1844 return false; 1845 1846 // We have a hit - Var0 and Var1 only differ by a constant offset! 1847 1848 // If we've been sext'ed then zext'd the maximum difference between Var0 and 1849 // Var1 is possible to calculate, but we're just interested in the absolute 1850 // minimum difference between the two. The minimum distance may occur due to 1851 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 1852 // the minimum distance between %i and %i + 5 is 3. 1853 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff; 1854 MinDiff = APIntOps::umin(MinDiff, Wrapped); 1855 uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale); 1856 1857 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 1858 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 1859 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 1860 // V2Size can fit in the MinDiffBytes gap. 1861 return V1Size + std::abs(BaseOffset) <= MinDiffBytes && 1862 V2Size + std::abs(BaseOffset) <= MinDiffBytes; 1863 } 1864 1865 //===----------------------------------------------------------------------===// 1866 // BasicAliasAnalysis Pass 1867 //===----------------------------------------------------------------------===// 1868 1869 AnalysisKey BasicAA::Key; 1870 1871 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 1872 return BasicAAResult(F.getParent()->getDataLayout(), 1873 AM.getResult<TargetLibraryAnalysis>(F), 1874 AM.getResult<AssumptionAnalysis>(F), 1875 &AM.getResult<DominatorTreeAnalysis>(F), 1876 AM.getCachedResult<LoopAnalysis>(F)); 1877 } 1878 1879 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 1880 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 1881 } 1882 1883 char BasicAAWrapperPass::ID = 0; 1884 1885 void BasicAAWrapperPass::anchor() {} 1886 1887 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa", 1888 "Basic Alias Analysis (stateless AA impl)", true, true) 1889 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1890 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1891 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1892 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa", 1893 "Basic Alias Analysis (stateless AA impl)", true, true) 1894 1895 FunctionPass *llvm::createBasicAAWrapperPass() { 1896 return new BasicAAWrapperPass(); 1897 } 1898 1899 bool BasicAAWrapperPass::runOnFunction(Function &F) { 1900 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 1901 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 1902 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 1903 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 1904 1905 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(), 1906 ACT.getAssumptionCache(F), &DTWP.getDomTree(), 1907 LIWP ? &LIWP->getLoopInfo() : nullptr)); 1908 1909 return false; 1910 } 1911 1912 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1913 AU.setPreservesAll(); 1914 AU.addRequired<AssumptionCacheTracker>(); 1915 AU.addRequired<DominatorTreeWrapperPass>(); 1916 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1917 } 1918 1919 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 1920 return BasicAAResult( 1921 F.getParent()->getDataLayout(), 1922 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 1923 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 1924 } 1925