1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the primary stateless implementation of the 11 // Alias Analysis interface that implements identities (two different 12 // globals cannot alias, etc), but does no stateful analysis. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/BasicAliasAnalysis.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/CFG.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/LoopInfo.h" 27 #include "llvm/Analysis/MemoryBuiltins.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/CallSite.h" 34 #include "llvm/IR/Constant.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Dominators.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GetElementPtrTypeIterator.h" 41 #include "llvm/IR/GlobalAlias.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/Metadata.h" 49 #include "llvm/IR/Operator.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/Pass.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/KnownBits.h" 58 #include <cassert> 59 #include <cstdint> 60 #include <cstdlib> 61 #include <utility> 62 63 #define DEBUG_TYPE "basicaa" 64 65 using namespace llvm; 66 67 /// Enable analysis of recursive PHI nodes. 68 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden, 69 cl::init(false)); 70 /// SearchLimitReached / SearchTimes shows how often the limit of 71 /// to decompose GEPs is reached. It will affect the precision 72 /// of basic alias analysis. 73 STATISTIC(SearchLimitReached, "Number of times the limit to " 74 "decompose GEPs is reached"); 75 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 76 77 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 78 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 79 /// careful with value equivalence. We use reachability to make sure a value 80 /// cannot be involved in a cycle. 81 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 82 83 // The max limit of the search depth in DecomposeGEPExpression() and 84 // GetUnderlyingObject(), both functions need to use the same search 85 // depth otherwise the algorithm in aliasGEP will assert. 86 static const unsigned MaxLookupSearchDepth = 6; 87 88 bool BasicAAResult::invalidate(Function &F, const PreservedAnalyses &PA, 89 FunctionAnalysisManager::Invalidator &Inv) { 90 // We don't care if this analysis itself is preserved, it has no state. But 91 // we need to check that the analyses it depends on have been. Note that we 92 // may be created without handles to some analyses and in that case don't 93 // depend on them. 94 if (Inv.invalidate<AssumptionAnalysis>(F, PA) || 95 (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)) || 96 (LI && Inv.invalidate<LoopAnalysis>(F, PA))) 97 return true; 98 99 // Otherwise this analysis result remains valid. 100 return false; 101 } 102 103 //===----------------------------------------------------------------------===// 104 // Useful predicates 105 //===----------------------------------------------------------------------===// 106 107 /// Returns true if the pointer is to a function-local object that never 108 /// escapes from the function. 109 static bool isNonEscapingLocalObject(const Value *V) { 110 // If this is a local allocation, check to see if it escapes. 111 if (isa<AllocaInst>(V) || isNoAliasCall(V)) 112 // Set StoreCaptures to True so that we can assume in our callers that the 113 // pointer is not the result of a load instruction. Currently 114 // PointerMayBeCaptured doesn't have any special analysis for the 115 // StoreCaptures=false case; if it did, our callers could be refined to be 116 // more precise. 117 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 118 119 // If this is an argument that corresponds to a byval or noalias argument, 120 // then it has not escaped before entering the function. Check if it escapes 121 // inside the function. 122 if (const Argument *A = dyn_cast<Argument>(V)) 123 if (A->hasByValAttr() || A->hasNoAliasAttr()) 124 // Note even if the argument is marked nocapture, we still need to check 125 // for copies made inside the function. The nocapture attribute only 126 // specifies that there are no copies made that outlive the function. 127 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 128 129 return false; 130 } 131 132 /// Returns true if the pointer is one which would have been considered an 133 /// escape by isNonEscapingLocalObject. 134 static bool isEscapeSource(const Value *V) { 135 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V)) 136 return true; 137 138 // The load case works because isNonEscapingLocalObject considers all 139 // stores to be escapes (it passes true for the StoreCaptures argument 140 // to PointerMayBeCaptured). 141 if (isa<LoadInst>(V)) 142 return true; 143 144 return false; 145 } 146 147 /// Returns the size of the object specified by V or UnknownSize if unknown. 148 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 149 const TargetLibraryInfo &TLI, 150 bool RoundToAlign = false) { 151 uint64_t Size; 152 ObjectSizeOpts Opts; 153 Opts.RoundToAlign = RoundToAlign; 154 if (getObjectSize(V, Size, DL, &TLI, Opts)) 155 return Size; 156 return MemoryLocation::UnknownSize; 157 } 158 159 /// Returns true if we can prove that the object specified by V is smaller than 160 /// Size. 161 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 162 const DataLayout &DL, 163 const TargetLibraryInfo &TLI) { 164 // Note that the meanings of the "object" are slightly different in the 165 // following contexts: 166 // c1: llvm::getObjectSize() 167 // c2: llvm.objectsize() intrinsic 168 // c3: isObjectSmallerThan() 169 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 170 // refers to the "entire object". 171 // 172 // Consider this example: 173 // char *p = (char*)malloc(100) 174 // char *q = p+80; 175 // 176 // In the context of c1 and c2, the "object" pointed by q refers to the 177 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 178 // 179 // However, in the context of c3, the "object" refers to the chunk of memory 180 // being allocated. So, the "object" has 100 bytes, and q points to the middle 181 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 182 // parameter, before the llvm::getObjectSize() is called to get the size of 183 // entire object, we should: 184 // - either rewind the pointer q to the base-address of the object in 185 // question (in this case rewind to p), or 186 // - just give up. It is up to caller to make sure the pointer is pointing 187 // to the base address the object. 188 // 189 // We go for 2nd option for simplicity. 190 if (!isIdentifiedObject(V)) 191 return false; 192 193 // This function needs to use the aligned object size because we allow 194 // reads a bit past the end given sufficient alignment. 195 uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true); 196 197 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 198 } 199 200 /// Returns true if we can prove that the object specified by V has size Size. 201 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 202 const TargetLibraryInfo &TLI) { 203 uint64_t ObjectSize = getObjectSize(V, DL, TLI); 204 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 205 } 206 207 //===----------------------------------------------------------------------===// 208 // GetElementPtr Instruction Decomposition and Analysis 209 //===----------------------------------------------------------------------===// 210 211 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 212 /// B are constant integers. 213 /// 214 /// Returns the scale and offset values as APInts and return V as a Value*, and 215 /// return whether we looked through any sign or zero extends. The incoming 216 /// Value is known to have IntegerType, and it may already be sign or zero 217 /// extended. 218 /// 219 /// Note that this looks through extends, so the high bits may not be 220 /// represented in the result. 221 /*static*/ const Value *BasicAAResult::GetLinearExpression( 222 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits, 223 unsigned &SExtBits, const DataLayout &DL, unsigned Depth, 224 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) { 225 assert(V->getType()->isIntegerTy() && "Not an integer value"); 226 227 // Limit our recursion depth. 228 if (Depth == 6) { 229 Scale = 1; 230 Offset = 0; 231 return V; 232 } 233 234 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) { 235 // If it's a constant, just convert it to an offset and remove the variable. 236 // If we've been called recursively, the Offset bit width will be greater 237 // than the constant's (the Offset's always as wide as the outermost call), 238 // so we'll zext here and process any extension in the isa<SExtInst> & 239 // isa<ZExtInst> cases below. 240 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth()); 241 assert(Scale == 0 && "Constant values don't have a scale"); 242 return V; 243 } 244 245 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 246 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 247 // If we've been called recursively, then Offset and Scale will be wider 248 // than the BOp operands. We'll always zext it here as we'll process sign 249 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases). 250 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth()); 251 252 switch (BOp->getOpcode()) { 253 default: 254 // We don't understand this instruction, so we can't decompose it any 255 // further. 256 Scale = 1; 257 Offset = 0; 258 return V; 259 case Instruction::Or: 260 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 261 // analyze it. 262 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 263 BOp, DT)) { 264 Scale = 1; 265 Offset = 0; 266 return V; 267 } 268 LLVM_FALLTHROUGH; 269 case Instruction::Add: 270 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 271 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 272 Offset += RHS; 273 break; 274 case Instruction::Sub: 275 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 276 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 277 Offset -= RHS; 278 break; 279 case Instruction::Mul: 280 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 281 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 282 Offset *= RHS; 283 Scale *= RHS; 284 break; 285 case Instruction::Shl: 286 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 287 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 288 289 // We're trying to linearize an expression of the kind: 290 // shl i8 -128, 36 291 // where the shift count exceeds the bitwidth of the type. 292 // We can't decompose this further (the expression would return 293 // a poison value). 294 if (Offset.getBitWidth() < RHS.getLimitedValue() || 295 Scale.getBitWidth() < RHS.getLimitedValue()) { 296 Scale = 1; 297 Offset = 0; 298 return V; 299 } 300 301 Offset <<= RHS.getLimitedValue(); 302 Scale <<= RHS.getLimitedValue(); 303 // the semantics of nsw and nuw for left shifts don't match those of 304 // multiplications, so we won't propagate them. 305 NSW = NUW = false; 306 return V; 307 } 308 309 if (isa<OverflowingBinaryOperator>(BOp)) { 310 NUW &= BOp->hasNoUnsignedWrap(); 311 NSW &= BOp->hasNoSignedWrap(); 312 } 313 return V; 314 } 315 } 316 317 // Since GEP indices are sign extended anyway, we don't care about the high 318 // bits of a sign or zero extended value - just scales and offsets. The 319 // extensions have to be consistent though. 320 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) { 321 Value *CastOp = cast<CastInst>(V)->getOperand(0); 322 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits(); 323 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 324 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits; 325 const Value *Result = 326 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL, 327 Depth + 1, AC, DT, NSW, NUW); 328 329 // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this 330 // by just incrementing the number of bits we've extended by. 331 unsigned ExtendedBy = NewWidth - SmallWidth; 332 333 if (isa<SExtInst>(V) && ZExtBits == 0) { 334 // sext(sext(%x, a), b) == sext(%x, a + b) 335 336 if (NSW) { 337 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c) 338 // into sext(%x) + sext(c). We'll sext the Offset ourselves: 339 unsigned OldWidth = Offset.getBitWidth(); 340 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth); 341 } else { 342 // We may have signed-wrapped, so don't decompose sext(%x + c) into 343 // sext(%x) + sext(c) 344 Scale = 1; 345 Offset = 0; 346 Result = CastOp; 347 ZExtBits = OldZExtBits; 348 SExtBits = OldSExtBits; 349 } 350 SExtBits += ExtendedBy; 351 } else { 352 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b) 353 354 if (!NUW) { 355 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into 356 // zext(%x) + zext(c) 357 Scale = 1; 358 Offset = 0; 359 Result = CastOp; 360 ZExtBits = OldZExtBits; 361 SExtBits = OldSExtBits; 362 } 363 ZExtBits += ExtendedBy; 364 } 365 366 return Result; 367 } 368 369 Scale = 1; 370 Offset = 0; 371 return V; 372 } 373 374 /// To ensure a pointer offset fits in an integer of size PointerSize 375 /// (in bits) when that size is smaller than 64. This is an issue in 376 /// particular for 32b programs with negative indices that rely on two's 377 /// complement wrap-arounds for precise alias information. 378 static int64_t adjustToPointerSize(int64_t Offset, unsigned PointerSize) { 379 assert(PointerSize <= 64 && "Invalid PointerSize!"); 380 unsigned ShiftBits = 64 - PointerSize; 381 return (int64_t)((uint64_t)Offset << ShiftBits) >> ShiftBits; 382 } 383 384 /// If V is a symbolic pointer expression, decompose it into a base pointer 385 /// with a constant offset and a number of scaled symbolic offsets. 386 /// 387 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 388 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 389 /// specified amount, but which may have other unrepresented high bits. As 390 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 391 /// 392 /// When DataLayout is around, this function is capable of analyzing everything 393 /// that GetUnderlyingObject can look through. To be able to do that 394 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search 395 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks 396 /// through pointer casts. 397 bool BasicAAResult::DecomposeGEPExpression(const Value *V, 398 DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC, 399 DominatorTree *DT) { 400 // Limit recursion depth to limit compile time in crazy cases. 401 unsigned MaxLookup = MaxLookupSearchDepth; 402 SearchTimes++; 403 404 Decomposed.StructOffset = 0; 405 Decomposed.OtherOffset = 0; 406 Decomposed.VarIndices.clear(); 407 do { 408 // See if this is a bitcast or GEP. 409 const Operator *Op = dyn_cast<Operator>(V); 410 if (!Op) { 411 // The only non-operator case we can handle are GlobalAliases. 412 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 413 if (!GA->isInterposable()) { 414 V = GA->getAliasee(); 415 continue; 416 } 417 } 418 Decomposed.Base = V; 419 return false; 420 } 421 422 if (Op->getOpcode() == Instruction::BitCast || 423 Op->getOpcode() == Instruction::AddrSpaceCast) { 424 V = Op->getOperand(0); 425 continue; 426 } 427 428 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 429 if (!GEPOp) { 430 if (auto CS = ImmutableCallSite(V)) 431 if (const Value *RV = CS.getReturnedArgOperand()) { 432 V = RV; 433 continue; 434 } 435 436 // If it's not a GEP, hand it off to SimplifyInstruction to see if it 437 // can come up with something. This matches what GetUnderlyingObject does. 438 if (const Instruction *I = dyn_cast<Instruction>(V)) 439 // TODO: Get a DominatorTree and AssumptionCache and use them here 440 // (these are both now available in this function, but this should be 441 // updated when GetUnderlyingObject is updated). TLI should be 442 // provided also. 443 if (const Value *Simplified = 444 SimplifyInstruction(const_cast<Instruction *>(I), DL)) { 445 V = Simplified; 446 continue; 447 } 448 449 Decomposed.Base = V; 450 return false; 451 } 452 453 // Don't attempt to analyze GEPs over unsized objects. 454 if (!GEPOp->getSourceElementType()->isSized()) { 455 Decomposed.Base = V; 456 return false; 457 } 458 459 unsigned AS = GEPOp->getPointerAddressSpace(); 460 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 461 gep_type_iterator GTI = gep_type_begin(GEPOp); 462 unsigned PointerSize = DL.getPointerSizeInBits(AS); 463 // Assume all GEP operands are constants until proven otherwise. 464 bool GepHasConstantOffset = true; 465 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 466 I != E; ++I, ++GTI) { 467 const Value *Index = *I; 468 // Compute the (potentially symbolic) offset in bytes for this index. 469 if (StructType *STy = GTI.getStructTypeOrNull()) { 470 // For a struct, add the member offset. 471 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 472 if (FieldNo == 0) 473 continue; 474 475 Decomposed.StructOffset += 476 DL.getStructLayout(STy)->getElementOffset(FieldNo); 477 continue; 478 } 479 480 // For an array/pointer, add the element offset, explicitly scaled. 481 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 482 if (CIdx->isZero()) 483 continue; 484 Decomposed.OtherOffset += 485 DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue(); 486 continue; 487 } 488 489 GepHasConstantOffset = false; 490 491 uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType()); 492 unsigned ZExtBits = 0, SExtBits = 0; 493 494 // If the integer type is smaller than the pointer size, it is implicitly 495 // sign extended to pointer size. 496 unsigned Width = Index->getType()->getIntegerBitWidth(); 497 if (PointerSize > Width) 498 SExtBits += PointerSize - Width; 499 500 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 501 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 502 bool NSW = true, NUW = true; 503 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits, 504 SExtBits, DL, 0, AC, DT, NSW, NUW); 505 506 // All GEP math happens in the width of the pointer type, 507 // so we can truncate the value to 64-bits as we don't handle 508 // currently pointers larger than 64 bits and we would crash 509 // later. TODO: Make `Scale` an APInt to avoid this problem. 510 if (IndexScale.getBitWidth() > 64) 511 IndexScale = IndexScale.sextOrTrunc(64); 512 513 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 514 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 515 Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale; 516 Scale *= IndexScale.getSExtValue(); 517 518 // If we already had an occurrence of this index variable, merge this 519 // scale into it. For example, we want to handle: 520 // A[x][x] -> x*16 + x*4 -> x*20 521 // This also ensures that 'x' only appears in the index list once. 522 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 523 if (Decomposed.VarIndices[i].V == Index && 524 Decomposed.VarIndices[i].ZExtBits == ZExtBits && 525 Decomposed.VarIndices[i].SExtBits == SExtBits) { 526 Scale += Decomposed.VarIndices[i].Scale; 527 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 528 break; 529 } 530 } 531 532 // Make sure that we have a scale that makes sense for this target's 533 // pointer size. 534 Scale = adjustToPointerSize(Scale, PointerSize); 535 536 if (Scale) { 537 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, 538 static_cast<int64_t>(Scale)}; 539 Decomposed.VarIndices.push_back(Entry); 540 } 541 } 542 543 // Take care of wrap-arounds 544 if (GepHasConstantOffset) { 545 Decomposed.StructOffset = 546 adjustToPointerSize(Decomposed.StructOffset, PointerSize); 547 Decomposed.OtherOffset = 548 adjustToPointerSize(Decomposed.OtherOffset, PointerSize); 549 } 550 551 // Analyze the base pointer next. 552 V = GEPOp->getOperand(0); 553 } while (--MaxLookup); 554 555 // If the chain of expressions is too deep, just return early. 556 Decomposed.Base = V; 557 SearchLimitReached++; 558 return true; 559 } 560 561 /// Returns whether the given pointer value points to memory that is local to 562 /// the function, with global constants being considered local to all 563 /// functions. 564 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 565 bool OrLocal) { 566 assert(Visited.empty() && "Visited must be cleared after use!"); 567 568 unsigned MaxLookup = 8; 569 SmallVector<const Value *, 16> Worklist; 570 Worklist.push_back(Loc.Ptr); 571 do { 572 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL); 573 if (!Visited.insert(V).second) { 574 Visited.clear(); 575 return AAResultBase::pointsToConstantMemory(Loc, OrLocal); 576 } 577 578 // An alloca instruction defines local memory. 579 if (OrLocal && isa<AllocaInst>(V)) 580 continue; 581 582 // A global constant counts as local memory for our purposes. 583 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 584 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 585 // global to be marked constant in some modules and non-constant in 586 // others. GV may even be a declaration, not a definition. 587 if (!GV->isConstant()) { 588 Visited.clear(); 589 return AAResultBase::pointsToConstantMemory(Loc, OrLocal); 590 } 591 continue; 592 } 593 594 // If both select values point to local memory, then so does the select. 595 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 596 Worklist.push_back(SI->getTrueValue()); 597 Worklist.push_back(SI->getFalseValue()); 598 continue; 599 } 600 601 // If all values incoming to a phi node point to local memory, then so does 602 // the phi. 603 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 604 // Don't bother inspecting phi nodes with many operands. 605 if (PN->getNumIncomingValues() > MaxLookup) { 606 Visited.clear(); 607 return AAResultBase::pointsToConstantMemory(Loc, OrLocal); 608 } 609 for (Value *IncValue : PN->incoming_values()) 610 Worklist.push_back(IncValue); 611 continue; 612 } 613 614 // Otherwise be conservative. 615 Visited.clear(); 616 return AAResultBase::pointsToConstantMemory(Loc, OrLocal); 617 } while (!Worklist.empty() && --MaxLookup); 618 619 Visited.clear(); 620 return Worklist.empty(); 621 } 622 623 /// Returns the behavior when calling the given call site. 624 FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) { 625 if (CS.doesNotAccessMemory()) 626 // Can't do better than this. 627 return FMRB_DoesNotAccessMemory; 628 629 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 630 631 // If the callsite knows it only reads memory, don't return worse 632 // than that. 633 if (CS.onlyReadsMemory()) 634 Min = FMRB_OnlyReadsMemory; 635 else if (CS.doesNotReadMemory()) 636 Min = FMRB_DoesNotReadMemory; 637 638 if (CS.onlyAccessesArgMemory()) 639 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 640 else if (CS.onlyAccessesInaccessibleMemory()) 641 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 642 else if (CS.onlyAccessesInaccessibleMemOrArgMem()) 643 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 644 645 // If CS has operand bundles then aliasing attributes from the function it 646 // calls do not directly apply to the CallSite. This can be made more 647 // precise in the future. 648 if (!CS.hasOperandBundles()) 649 if (const Function *F = CS.getCalledFunction()) 650 Min = 651 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 652 653 return Min; 654 } 655 656 /// Returns the behavior when calling the given function. For use when the call 657 /// site is not known. 658 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 659 // If the function declares it doesn't access memory, we can't do better. 660 if (F->doesNotAccessMemory()) 661 return FMRB_DoesNotAccessMemory; 662 663 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 664 665 // If the function declares it only reads memory, go with that. 666 if (F->onlyReadsMemory()) 667 Min = FMRB_OnlyReadsMemory; 668 else if (F->doesNotReadMemory()) 669 Min = FMRB_DoesNotReadMemory; 670 671 if (F->onlyAccessesArgMemory()) 672 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 673 else if (F->onlyAccessesInaccessibleMemory()) 674 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 675 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 676 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 677 678 return Min; 679 } 680 681 /// Returns true if this is a writeonly (i.e Mod only) parameter. 682 static bool isWriteOnlyParam(ImmutableCallSite CS, unsigned ArgIdx, 683 const TargetLibraryInfo &TLI) { 684 if (CS.paramHasAttr(ArgIdx, Attribute::WriteOnly)) 685 return true; 686 687 // We can bound the aliasing properties of memset_pattern16 just as we can 688 // for memcpy/memset. This is particularly important because the 689 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 690 // whenever possible. 691 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 692 // attributes. 693 LibFunc F; 694 if (CS.getCalledFunction() && TLI.getLibFunc(*CS.getCalledFunction(), F) && 695 F == LibFunc_memset_pattern16 && TLI.has(F)) 696 if (ArgIdx == 0) 697 return true; 698 699 // TODO: memset_pattern4, memset_pattern8 700 // TODO: _chk variants 701 // TODO: strcmp, strcpy 702 703 return false; 704 } 705 706 ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS, 707 unsigned ArgIdx) { 708 // Checking for known builtin intrinsics and target library functions. 709 if (isWriteOnlyParam(CS, ArgIdx, TLI)) 710 return ModRefInfo::Mod; 711 712 if (CS.paramHasAttr(ArgIdx, Attribute::ReadOnly)) 713 return ModRefInfo::Ref; 714 715 if (CS.paramHasAttr(ArgIdx, Attribute::ReadNone)) 716 return ModRefInfo::NoModRef; 717 718 return AAResultBase::getArgModRefInfo(CS, ArgIdx); 719 } 720 721 static bool isIntrinsicCall(ImmutableCallSite CS, Intrinsic::ID IID) { 722 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()); 723 return II && II->getIntrinsicID() == IID; 724 } 725 726 #ifndef NDEBUG 727 static const Function *getParent(const Value *V) { 728 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 729 if (!inst->getParent()) 730 return nullptr; 731 return inst->getParent()->getParent(); 732 } 733 734 if (const Argument *arg = dyn_cast<Argument>(V)) 735 return arg->getParent(); 736 737 return nullptr; 738 } 739 740 static bool notDifferentParent(const Value *O1, const Value *O2) { 741 742 const Function *F1 = getParent(O1); 743 const Function *F2 = getParent(O2); 744 745 return !F1 || !F2 || F1 == F2; 746 } 747 #endif 748 749 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 750 const MemoryLocation &LocB) { 751 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 752 "BasicAliasAnalysis doesn't support interprocedural queries."); 753 754 // If we have a directly cached entry for these locations, we have recursed 755 // through this once, so just return the cached results. Notably, when this 756 // happens, we don't clear the cache. 757 auto CacheIt = AliasCache.find(LocPair(LocA, LocB)); 758 if (CacheIt != AliasCache.end()) 759 return CacheIt->second; 760 761 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr, 762 LocB.Size, LocB.AATags); 763 // AliasCache rarely has more than 1 or 2 elements, always use 764 // shrink_and_clear so it quickly returns to the inline capacity of the 765 // SmallDenseMap if it ever grows larger. 766 // FIXME: This should really be shrink_to_inline_capacity_and_clear(). 767 AliasCache.shrink_and_clear(); 768 VisitedPhiBBs.clear(); 769 return Alias; 770 } 771 772 /// Checks to see if the specified callsite can clobber the specified memory 773 /// object. 774 /// 775 /// Since we only look at local properties of this function, we really can't 776 /// say much about this query. We do, however, use simple "address taken" 777 /// analysis on local objects. 778 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, 779 const MemoryLocation &Loc) { 780 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) && 781 "AliasAnalysis query involving multiple functions!"); 782 783 const Value *Object = GetUnderlyingObject(Loc.Ptr, DL); 784 785 // If this is a tail call and Loc.Ptr points to a stack location, we know that 786 // the tail call cannot access or modify the local stack. 787 // We cannot exclude byval arguments here; these belong to the caller of 788 // the current function not to the current function, and a tail callee 789 // may reference them. 790 if (isa<AllocaInst>(Object)) 791 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) 792 if (CI->isTailCall()) 793 return ModRefInfo::NoModRef; 794 795 // If the pointer is to a locally allocated object that does not escape, 796 // then the call can not mod/ref the pointer unless the call takes the pointer 797 // as an argument, and itself doesn't capture it. 798 if (!isa<Constant>(Object) && CS.getInstruction() != Object && 799 isNonEscapingLocalObject(Object)) { 800 801 // Optimistically assume that call doesn't touch Object and check this 802 // assumption in the following loop. 803 ModRefInfo Result = ModRefInfo::NoModRef; 804 bool IsMustAlias = true; 805 806 unsigned OperandNo = 0; 807 for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end(); 808 CI != CE; ++CI, ++OperandNo) { 809 // Only look at the no-capture or byval pointer arguments. If this 810 // pointer were passed to arguments that were neither of these, then it 811 // couldn't be no-capture. 812 if (!(*CI)->getType()->isPointerTy() || 813 (!CS.doesNotCapture(OperandNo) && 814 OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo))) 815 continue; 816 817 // Call doesn't access memory through this operand, so we don't care 818 // if it aliases with Object. 819 if (CS.doesNotAccessMemory(OperandNo)) 820 continue; 821 822 // If this is a no-capture pointer argument, see if we can tell that it 823 // is impossible to alias the pointer we're checking. 824 AliasResult AR = 825 getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object)); 826 if (AR != MustAlias) 827 IsMustAlias = false; 828 // Operand doesnt alias 'Object', continue looking for other aliases 829 if (AR == NoAlias) 830 continue; 831 // Operand aliases 'Object', but call doesn't modify it. Strengthen 832 // initial assumption and keep looking in case if there are more aliases. 833 if (CS.onlyReadsMemory(OperandNo)) { 834 Result = setRef(Result); 835 continue; 836 } 837 // Operand aliases 'Object' but call only writes into it. 838 if (CS.doesNotReadMemory(OperandNo)) { 839 Result = setMod(Result); 840 continue; 841 } 842 // This operand aliases 'Object' and call reads and writes into it. 843 // Setting ModRef will not yield an early return below, MustAlias is not 844 // used further. 845 Result = ModRefInfo::ModRef; 846 break; 847 } 848 849 // No operand aliases, reset Must bit. Add below if at least one aliases 850 // and all aliases found are MustAlias. 851 if (isNoModRef(Result)) 852 IsMustAlias = false; 853 854 // Early return if we improved mod ref information 855 if (!isModAndRefSet(Result)) { 856 if (isNoModRef(Result)) 857 return ModRefInfo::NoModRef; 858 return IsMustAlias ? setMust(Result) : clearMust(Result); 859 } 860 } 861 862 // If the CallSite is to malloc or calloc, we can assume that it doesn't 863 // modify any IR visible value. This is only valid because we assume these 864 // routines do not read values visible in the IR. TODO: Consider special 865 // casing realloc and strdup routines which access only their arguments as 866 // well. Or alternatively, replace all of this with inaccessiblememonly once 867 // that's implemented fully. 868 auto *Inst = CS.getInstruction(); 869 if (isMallocOrCallocLikeFn(Inst, &TLI)) { 870 // Be conservative if the accessed pointer may alias the allocation - 871 // fallback to the generic handling below. 872 if (getBestAAResults().alias(MemoryLocation(Inst), Loc) == NoAlias) 873 return ModRefInfo::NoModRef; 874 } 875 876 // The semantics of memcpy intrinsics forbid overlap between their respective 877 // operands, i.e., source and destination of any given memcpy must no-alias. 878 // If Loc must-aliases either one of these two locations, then it necessarily 879 // no-aliases the other. 880 if (auto *Inst = dyn_cast<MemCpyInst>(CS.getInstruction())) { 881 AliasResult SrcAA, DestAA; 882 883 if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst), 884 Loc)) == MustAlias) 885 // Loc is exactly the memcpy source thus disjoint from memcpy dest. 886 return ModRefInfo::Ref; 887 if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst), 888 Loc)) == MustAlias) 889 // The converse case. 890 return ModRefInfo::Mod; 891 892 // It's also possible for Loc to alias both src and dest, or neither. 893 ModRefInfo rv = ModRefInfo::NoModRef; 894 if (SrcAA != NoAlias) 895 rv = setRef(rv); 896 if (DestAA != NoAlias) 897 rv = setMod(rv); 898 return rv; 899 } 900 901 // While the assume intrinsic is marked as arbitrarily writing so that 902 // proper control dependencies will be maintained, it never aliases any 903 // particular memory location. 904 if (isIntrinsicCall(CS, Intrinsic::assume)) 905 return ModRefInfo::NoModRef; 906 907 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 908 // that proper control dependencies are maintained but they never mods any 909 // particular memory location. 910 // 911 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 912 // heap state at the point the guard is issued needs to be consistent in case 913 // the guard invokes the "deopt" continuation. 914 if (isIntrinsicCall(CS, Intrinsic::experimental_guard)) 915 return ModRefInfo::Ref; 916 917 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 918 // writing so that proper control dependencies are maintained but they never 919 // mod any particular memory location visible to the IR. 920 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 921 // intrinsic is now modeled as reading memory. This prevents hoisting the 922 // invariant.start intrinsic over stores. Consider: 923 // *ptr = 40; 924 // *ptr = 50; 925 // invariant_start(ptr) 926 // int val = *ptr; 927 // print(val); 928 // 929 // This cannot be transformed to: 930 // 931 // *ptr = 40; 932 // invariant_start(ptr) 933 // *ptr = 50; 934 // int val = *ptr; 935 // print(val); 936 // 937 // The transformation will cause the second store to be ignored (based on 938 // rules of invariant.start) and print 40, while the first program always 939 // prints 50. 940 if (isIntrinsicCall(CS, Intrinsic::invariant_start)) 941 return ModRefInfo::Ref; 942 943 // The AAResultBase base class has some smarts, lets use them. 944 return AAResultBase::getModRefInfo(CS, Loc); 945 } 946 947 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1, 948 ImmutableCallSite CS2) { 949 // While the assume intrinsic is marked as arbitrarily writing so that 950 // proper control dependencies will be maintained, it never aliases any 951 // particular memory location. 952 if (isIntrinsicCall(CS1, Intrinsic::assume) || 953 isIntrinsicCall(CS2, Intrinsic::assume)) 954 return ModRefInfo::NoModRef; 955 956 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 957 // that proper control dependencies are maintained but they never mod any 958 // particular memory location. 959 // 960 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 961 // heap state at the point the guard is issued needs to be consistent in case 962 // the guard invokes the "deopt" continuation. 963 964 // NB! This function is *not* commutative, so we specical case two 965 // possibilities for guard intrinsics. 966 967 if (isIntrinsicCall(CS1, Intrinsic::experimental_guard)) 968 return isModSet(createModRefInfo(getModRefBehavior(CS2))) 969 ? ModRefInfo::Ref 970 : ModRefInfo::NoModRef; 971 972 if (isIntrinsicCall(CS2, Intrinsic::experimental_guard)) 973 return isModSet(createModRefInfo(getModRefBehavior(CS1))) 974 ? ModRefInfo::Mod 975 : ModRefInfo::NoModRef; 976 977 // The AAResultBase base class has some smarts, lets use them. 978 return AAResultBase::getModRefInfo(CS1, CS2); 979 } 980 981 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators, 982 /// both having the exact same pointer operand. 983 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, 984 uint64_t V1Size, 985 const GEPOperator *GEP2, 986 uint64_t V2Size, 987 const DataLayout &DL) { 988 assert(GEP1->getPointerOperand()->stripPointerCastsAndBarriers() == 989 GEP2->getPointerOperand()->stripPointerCastsAndBarriers() && 990 GEP1->getPointerOperandType() == GEP2->getPointerOperandType() && 991 "Expected GEPs with the same pointer operand"); 992 993 // Try to determine whether GEP1 and GEP2 index through arrays, into structs, 994 // such that the struct field accesses provably cannot alias. 995 // We also need at least two indices (the pointer, and the struct field). 996 if (GEP1->getNumIndices() != GEP2->getNumIndices() || 997 GEP1->getNumIndices() < 2) 998 return MayAlias; 999 1000 // If we don't know the size of the accesses through both GEPs, we can't 1001 // determine whether the struct fields accessed can't alias. 1002 if (V1Size == MemoryLocation::UnknownSize || 1003 V2Size == MemoryLocation::UnknownSize) 1004 return MayAlias; 1005 1006 ConstantInt *C1 = 1007 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1)); 1008 ConstantInt *C2 = 1009 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1)); 1010 1011 // If the last (struct) indices are constants and are equal, the other indices 1012 // might be also be dynamically equal, so the GEPs can alias. 1013 if (C1 && C2 && C1->getSExtValue() == C2->getSExtValue()) 1014 return MayAlias; 1015 1016 // Find the last-indexed type of the GEP, i.e., the type you'd get if 1017 // you stripped the last index. 1018 // On the way, look at each indexed type. If there's something other 1019 // than an array, different indices can lead to different final types. 1020 SmallVector<Value *, 8> IntermediateIndices; 1021 1022 // Insert the first index; we don't need to check the type indexed 1023 // through it as it only drops the pointer indirection. 1024 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine"); 1025 IntermediateIndices.push_back(GEP1->getOperand(1)); 1026 1027 // Insert all the remaining indices but the last one. 1028 // Also, check that they all index through arrays. 1029 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) { 1030 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType( 1031 GEP1->getSourceElementType(), IntermediateIndices))) 1032 return MayAlias; 1033 IntermediateIndices.push_back(GEP1->getOperand(i + 1)); 1034 } 1035 1036 auto *Ty = GetElementPtrInst::getIndexedType( 1037 GEP1->getSourceElementType(), IntermediateIndices); 1038 StructType *LastIndexedStruct = dyn_cast<StructType>(Ty); 1039 1040 if (isa<SequentialType>(Ty)) { 1041 // We know that: 1042 // - both GEPs begin indexing from the exact same pointer; 1043 // - the last indices in both GEPs are constants, indexing into a sequential 1044 // type (array or pointer); 1045 // - both GEPs only index through arrays prior to that. 1046 // 1047 // Because array indices greater than the number of elements are valid in 1048 // GEPs, unless we know the intermediate indices are identical between 1049 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't 1050 // partially overlap. We also need to check that the loaded size matches 1051 // the element size, otherwise we could still have overlap. 1052 const uint64_t ElementSize = 1053 DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType()); 1054 if (V1Size != ElementSize || V2Size != ElementSize) 1055 return MayAlias; 1056 1057 for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i) 1058 if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1)) 1059 return MayAlias; 1060 1061 // Now we know that the array/pointer that GEP1 indexes into and that 1062 // that GEP2 indexes into must either precisely overlap or be disjoint. 1063 // Because they cannot partially overlap and because fields in an array 1064 // cannot overlap, if we can prove the final indices are different between 1065 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias. 1066 1067 // If the last indices are constants, we've already checked they don't 1068 // equal each other so we can exit early. 1069 if (C1 && C2) 1070 return NoAlias; 1071 { 1072 Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1); 1073 Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1); 1074 if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) { 1075 // If one of the indices is a PHI node, be safe and only use 1076 // computeKnownBits so we don't make any assumptions about the 1077 // relationships between the two indices. This is important if we're 1078 // asking about values from different loop iterations. See PR32314. 1079 // TODO: We may be able to change the check so we only do this when 1080 // we definitely looked through a PHINode. 1081 if (GEP1LastIdx != GEP2LastIdx && 1082 GEP1LastIdx->getType() == GEP2LastIdx->getType()) { 1083 KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL); 1084 KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL); 1085 if (Known1.Zero.intersects(Known2.One) || 1086 Known1.One.intersects(Known2.Zero)) 1087 return NoAlias; 1088 } 1089 } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL)) 1090 return NoAlias; 1091 } 1092 return MayAlias; 1093 } else if (!LastIndexedStruct || !C1 || !C2) { 1094 return MayAlias; 1095 } 1096 1097 // We know that: 1098 // - both GEPs begin indexing from the exact same pointer; 1099 // - the last indices in both GEPs are constants, indexing into a struct; 1100 // - said indices are different, hence, the pointed-to fields are different; 1101 // - both GEPs only index through arrays prior to that. 1102 // 1103 // This lets us determine that the struct that GEP1 indexes into and the 1104 // struct that GEP2 indexes into must either precisely overlap or be 1105 // completely disjoint. Because they cannot partially overlap, indexing into 1106 // different non-overlapping fields of the struct will never alias. 1107 1108 // Therefore, the only remaining thing needed to show that both GEPs can't 1109 // alias is that the fields are not overlapping. 1110 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct); 1111 const uint64_t StructSize = SL->getSizeInBytes(); 1112 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue()); 1113 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue()); 1114 1115 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size, 1116 uint64_t V2Off, uint64_t V2Size) { 1117 return V1Off < V2Off && V1Off + V1Size <= V2Off && 1118 ((V2Off + V2Size <= StructSize) || 1119 (V2Off + V2Size - StructSize <= V1Off)); 1120 }; 1121 1122 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) || 1123 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size)) 1124 return NoAlias; 1125 1126 return MayAlias; 1127 } 1128 1129 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the 1130 // beginning of the object the GEP points would have a negative offset with 1131 // repsect to the alloca, that means the GEP can not alias pointer (b). 1132 // Note that the pointer based on the alloca may not be a GEP. For 1133 // example, it may be the alloca itself. 1134 // The same applies if (b) is based on a GlobalVariable. Note that just being 1135 // based on isIdentifiedObject() is not enough - we need an identified object 1136 // that does not permit access to negative offsets. For example, a negative 1137 // offset from a noalias argument or call can be inbounds w.r.t the actual 1138 // underlying object. 1139 // 1140 // For example, consider: 1141 // 1142 // struct { int f0, int f1, ...} foo; 1143 // foo alloca; 1144 // foo* random = bar(alloca); 1145 // int *f0 = &alloca.f0 1146 // int *f1 = &random->f1; 1147 // 1148 // Which is lowered, approximately, to: 1149 // 1150 // %alloca = alloca %struct.foo 1151 // %random = call %struct.foo* @random(%struct.foo* %alloca) 1152 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0 1153 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1 1154 // 1155 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated 1156 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also 1157 // point into the same object. But since %f0 points to the beginning of %alloca, 1158 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher 1159 // than (%alloca - 1), and so is not inbounds, a contradiction. 1160 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp, 1161 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject, 1162 uint64_t ObjectAccessSize) { 1163 // If the object access size is unknown, or the GEP isn't inbounds, bail. 1164 if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds()) 1165 return false; 1166 1167 // We need the object to be an alloca or a globalvariable, and want to know 1168 // the offset of the pointer from the object precisely, so no variable 1169 // indices are allowed. 1170 if (!(isa<AllocaInst>(DecompObject.Base) || 1171 isa<GlobalVariable>(DecompObject.Base)) || 1172 !DecompObject.VarIndices.empty()) 1173 return false; 1174 1175 int64_t ObjectBaseOffset = DecompObject.StructOffset + 1176 DecompObject.OtherOffset; 1177 1178 // If the GEP has no variable indices, we know the precise offset 1179 // from the base, then use it. If the GEP has variable indices, we're in 1180 // a bit more trouble: we can't count on the constant offsets that come 1181 // from non-struct sources, since these can be "rewound" by a negative 1182 // variable offset. So use only offsets that came from structs. 1183 int64_t GEPBaseOffset = DecompGEP.StructOffset; 1184 if (DecompGEP.VarIndices.empty()) 1185 GEPBaseOffset += DecompGEP.OtherOffset; 1186 1187 return (GEPBaseOffset >= ObjectBaseOffset + (int64_t)ObjectAccessSize); 1188 } 1189 1190 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1191 /// another pointer. 1192 /// 1193 /// We know that V1 is a GEP, but we don't know anything about V2. 1194 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for 1195 /// V2. 1196 AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, 1197 const AAMDNodes &V1AAInfo, const Value *V2, 1198 uint64_t V2Size, const AAMDNodes &V2AAInfo, 1199 const Value *UnderlyingV1, 1200 const Value *UnderlyingV2) { 1201 DecomposedGEP DecompGEP1, DecompGEP2; 1202 bool GEP1MaxLookupReached = 1203 DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT); 1204 bool GEP2MaxLookupReached = 1205 DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT); 1206 1207 int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset; 1208 int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset; 1209 1210 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 && 1211 "DecomposeGEPExpression returned a result different from " 1212 "GetUnderlyingObject"); 1213 1214 // If the GEP's offset relative to its base is such that the base would 1215 // fall below the start of the object underlying V2, then the GEP and V2 1216 // cannot alias. 1217 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached && 1218 isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size)) 1219 return NoAlias; 1220 // If we have two gep instructions with must-alias or not-alias'ing base 1221 // pointers, figure out if the indexes to the GEP tell us anything about the 1222 // derived pointer. 1223 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 1224 // Check for the GEP base being at a negative offset, this time in the other 1225 // direction. 1226 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached && 1227 isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size)) 1228 return NoAlias; 1229 // Do the base pointers alias? 1230 AliasResult BaseAlias = 1231 aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(), 1232 UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes()); 1233 1234 // Check for geps of non-aliasing underlying pointers where the offsets are 1235 // identical. 1236 if ((BaseAlias == MayAlias) && V1Size == V2Size) { 1237 // Do the base pointers alias assuming type and size. 1238 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo, 1239 UnderlyingV2, V2Size, V2AAInfo); 1240 if (PreciseBaseAlias == NoAlias) { 1241 // See if the computed offset from the common pointer tells us about the 1242 // relation of the resulting pointer. 1243 // If the max search depth is reached the result is undefined 1244 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1245 return MayAlias; 1246 1247 // Same offsets. 1248 if (GEP1BaseOffset == GEP2BaseOffset && 1249 DecompGEP1.VarIndices == DecompGEP2.VarIndices) 1250 return NoAlias; 1251 } 1252 } 1253 1254 // If we get a No or May, then return it immediately, no amount of analysis 1255 // will improve this situation. 1256 if (BaseAlias != MustAlias) { 1257 assert(BaseAlias == NoAlias || BaseAlias == MayAlias); 1258 return BaseAlias; 1259 } 1260 1261 // Otherwise, we have a MustAlias. Since the base pointers alias each other 1262 // exactly, see if the computed offset from the common pointer tells us 1263 // about the relation of the resulting pointer. 1264 // If we know the two GEPs are based off of the exact same pointer (and not 1265 // just the same underlying object), see if that tells us anything about 1266 // the resulting pointers. 1267 if (GEP1->getPointerOperand()->stripPointerCastsAndBarriers() == 1268 GEP2->getPointerOperand()->stripPointerCastsAndBarriers() && 1269 GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) { 1270 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL); 1271 // If we couldn't find anything interesting, don't abandon just yet. 1272 if (R != MayAlias) 1273 return R; 1274 } 1275 1276 // If the max search depth is reached, the result is undefined 1277 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1278 return MayAlias; 1279 1280 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1281 // symbolic difference. 1282 GEP1BaseOffset -= GEP2BaseOffset; 1283 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices); 1284 1285 } else { 1286 // Check to see if these two pointers are related by the getelementptr 1287 // instruction. If one pointer is a GEP with a non-zero index of the other 1288 // pointer, we know they cannot alias. 1289 1290 // If both accesses are unknown size, we can't do anything useful here. 1291 if (V1Size == MemoryLocation::UnknownSize && 1292 V2Size == MemoryLocation::UnknownSize) 1293 return MayAlias; 1294 1295 AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, 1296 AAMDNodes(), V2, MemoryLocation::UnknownSize, 1297 V2AAInfo, nullptr, UnderlyingV2); 1298 if (R != MustAlias) { 1299 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 1300 // If V2 is known not to alias GEP base pointer, then the two values 1301 // cannot alias per GEP semantics: "Any memory access must be done through 1302 // a pointer value associated with an address range of the memory access, 1303 // otherwise the behavior is undefined.". 1304 assert(R == NoAlias || R == MayAlias); 1305 return R; 1306 } 1307 1308 // If the max search depth is reached the result is undefined 1309 if (GEP1MaxLookupReached) 1310 return MayAlias; 1311 } 1312 1313 // In the two GEP Case, if there is no difference in the offsets of the 1314 // computed pointers, the resultant pointers are a must alias. This 1315 // happens when we have two lexically identical GEP's (for example). 1316 // 1317 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 1318 // must aliases the GEP, the end result is a must alias also. 1319 if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty()) 1320 return MustAlias; 1321 1322 // If there is a constant difference between the pointers, but the difference 1323 // is less than the size of the associated memory object, then we know 1324 // that the objects are partially overlapping. If the difference is 1325 // greater, we know they do not overlap. 1326 if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) { 1327 if (GEP1BaseOffset >= 0) { 1328 if (V2Size != MemoryLocation::UnknownSize) { 1329 if ((uint64_t)GEP1BaseOffset < V2Size) 1330 return PartialAlias; 1331 return NoAlias; 1332 } 1333 } else { 1334 // We have the situation where: 1335 // + + 1336 // | BaseOffset | 1337 // ---------------->| 1338 // |-->V1Size |-------> V2Size 1339 // GEP1 V2 1340 // We need to know that V2Size is not unknown, otherwise we might have 1341 // stripped a gep with negative index ('gep <ptr>, -1, ...). 1342 if (V1Size != MemoryLocation::UnknownSize && 1343 V2Size != MemoryLocation::UnknownSize) { 1344 if (-(uint64_t)GEP1BaseOffset < V1Size) 1345 return PartialAlias; 1346 return NoAlias; 1347 } 1348 } 1349 } 1350 1351 if (!DecompGEP1.VarIndices.empty()) { 1352 uint64_t Modulo = 0; 1353 bool AllPositive = true; 1354 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1355 1356 // Try to distinguish something like &A[i][1] against &A[42][0]. 1357 // Grab the least significant bit set in any of the scales. We 1358 // don't need std::abs here (even if the scale's negative) as we'll 1359 // be ^'ing Modulo with itself later. 1360 Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale; 1361 1362 if (AllPositive) { 1363 // If the Value could change between cycles, then any reasoning about 1364 // the Value this cycle may not hold in the next cycle. We'll just 1365 // give up if we can't determine conditions that hold for every cycle: 1366 const Value *V = DecompGEP1.VarIndices[i].V; 1367 1368 KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT); 1369 bool SignKnownZero = Known.isNonNegative(); 1370 bool SignKnownOne = Known.isNegative(); 1371 1372 // Zero-extension widens the variable, and so forces the sign 1373 // bit to zero. 1374 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V); 1375 SignKnownZero |= IsZExt; 1376 SignKnownOne &= !IsZExt; 1377 1378 // If the variable begins with a zero then we know it's 1379 // positive, regardless of whether the value is signed or 1380 // unsigned. 1381 int64_t Scale = DecompGEP1.VarIndices[i].Scale; 1382 AllPositive = 1383 (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0); 1384 } 1385 } 1386 1387 Modulo = Modulo ^ (Modulo & (Modulo - 1)); 1388 1389 // We can compute the difference between the two addresses 1390 // mod Modulo. Check whether that difference guarantees that the 1391 // two locations do not alias. 1392 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1); 1393 if (V1Size != MemoryLocation::UnknownSize && 1394 V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size && 1395 V1Size <= Modulo - ModOffset) 1396 return NoAlias; 1397 1398 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr. 1399 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers 1400 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr. 1401 if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset) 1402 return NoAlias; 1403 1404 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size, 1405 GEP1BaseOffset, &AC, DT)) 1406 return NoAlias; 1407 } 1408 1409 // Statically, we can see that the base objects are the same, but the 1410 // pointers have dynamic offsets which we can't resolve. And none of our 1411 // little tricks above worked. 1412 return MayAlias; 1413 } 1414 1415 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1416 // If the results agree, take it. 1417 if (A == B) 1418 return A; 1419 // A mix of PartialAlias and MustAlias is PartialAlias. 1420 if ((A == PartialAlias && B == MustAlias) || 1421 (B == PartialAlias && A == MustAlias)) 1422 return PartialAlias; 1423 // Otherwise, we don't know anything. 1424 return MayAlias; 1425 } 1426 1427 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1428 /// against another. 1429 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize, 1430 const AAMDNodes &SIAAInfo, 1431 const Value *V2, uint64_t V2Size, 1432 const AAMDNodes &V2AAInfo, 1433 const Value *UnderV2) { 1434 // If the values are Selects with the same condition, we can do a more precise 1435 // check: just check for aliases between the values on corresponding arms. 1436 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1437 if (SI->getCondition() == SI2->getCondition()) { 1438 AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, 1439 SI2->getTrueValue(), V2Size, V2AAInfo); 1440 if (Alias == MayAlias) 1441 return MayAlias; 1442 AliasResult ThisAlias = 1443 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo, 1444 SI2->getFalseValue(), V2Size, V2AAInfo); 1445 return MergeAliasResults(ThisAlias, Alias); 1446 } 1447 1448 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1449 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1450 AliasResult Alias = 1451 aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), 1452 SISize, SIAAInfo, UnderV2); 1453 if (Alias == MayAlias) 1454 return MayAlias; 1455 1456 AliasResult ThisAlias = 1457 aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo, 1458 UnderV2); 1459 return MergeAliasResults(ThisAlias, Alias); 1460 } 1461 1462 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1463 /// another. 1464 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize, 1465 const AAMDNodes &PNAAInfo, const Value *V2, 1466 uint64_t V2Size, const AAMDNodes &V2AAInfo, 1467 const Value *UnderV2) { 1468 // Track phi nodes we have visited. We use this information when we determine 1469 // value equivalence. 1470 VisitedPhiBBs.insert(PN->getParent()); 1471 1472 // If the values are PHIs in the same block, we can do a more precise 1473 // as well as efficient check: just check for aliases between the values 1474 // on corresponding edges. 1475 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1476 if (PN2->getParent() == PN->getParent()) { 1477 LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo), 1478 MemoryLocation(V2, V2Size, V2AAInfo)); 1479 if (PN > V2) 1480 std::swap(Locs.first, Locs.second); 1481 // Analyse the PHIs' inputs under the assumption that the PHIs are 1482 // NoAlias. 1483 // If the PHIs are May/MustAlias there must be (recursively) an input 1484 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or 1485 // there must be an operation on the PHIs within the PHIs' value cycle 1486 // that causes a MayAlias. 1487 // Pretend the phis do not alias. 1488 AliasResult Alias = NoAlias; 1489 assert(AliasCache.count(Locs) && 1490 "There must exist an entry for the phi node"); 1491 AliasResult OrigAliasResult = AliasCache[Locs]; 1492 AliasCache[Locs] = NoAlias; 1493 1494 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1495 AliasResult ThisAlias = 1496 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo, 1497 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1498 V2Size, V2AAInfo); 1499 Alias = MergeAliasResults(ThisAlias, Alias); 1500 if (Alias == MayAlias) 1501 break; 1502 } 1503 1504 // Reset if speculation failed. 1505 if (Alias != NoAlias) 1506 AliasCache[Locs] = OrigAliasResult; 1507 1508 return Alias; 1509 } 1510 1511 SmallPtrSet<Value *, 4> UniqueSrc; 1512 SmallVector<Value *, 4> V1Srcs; 1513 bool isRecursive = false; 1514 for (Value *PV1 : PN->incoming_values()) { 1515 if (isa<PHINode>(PV1)) 1516 // If any of the source itself is a PHI, return MayAlias conservatively 1517 // to avoid compile time explosion. The worst possible case is if both 1518 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1519 // and 'n' are the number of PHI sources. 1520 return MayAlias; 1521 1522 if (EnableRecPhiAnalysis) 1523 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) { 1524 // Check whether the incoming value is a GEP that advances the pointer 1525 // result of this PHI node (e.g. in a loop). If this is the case, we 1526 // would recurse and always get a MayAlias. Handle this case specially 1527 // below. 1528 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 && 1529 isa<ConstantInt>(PV1GEP->idx_begin())) { 1530 isRecursive = true; 1531 continue; 1532 } 1533 } 1534 1535 if (UniqueSrc.insert(PV1).second) 1536 V1Srcs.push_back(PV1); 1537 } 1538 1539 // If this PHI node is recursive, set the size of the accessed memory to 1540 // unknown to represent all the possible values the GEP could advance the 1541 // pointer to. 1542 if (isRecursive) 1543 PNSize = MemoryLocation::UnknownSize; 1544 1545 AliasResult Alias = 1546 aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], 1547 PNSize, PNAAInfo, UnderV2); 1548 1549 // Early exit if the check of the first PHI source against V2 is MayAlias. 1550 // Other results are not possible. 1551 if (Alias == MayAlias) 1552 return MayAlias; 1553 1554 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1555 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1556 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1557 Value *V = V1Srcs[i]; 1558 1559 AliasResult ThisAlias = 1560 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2); 1561 Alias = MergeAliasResults(ThisAlias, Alias); 1562 if (Alias == MayAlias) 1563 break; 1564 } 1565 1566 return Alias; 1567 } 1568 1569 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1570 /// array references. 1571 AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size, 1572 AAMDNodes V1AAInfo, const Value *V2, 1573 uint64_t V2Size, AAMDNodes V2AAInfo, 1574 const Value *O1, const Value *O2) { 1575 // If either of the memory references is empty, it doesn't matter what the 1576 // pointer values are. 1577 if (V1Size == 0 || V2Size == 0) 1578 return NoAlias; 1579 1580 // Strip off any casts if they exist. 1581 V1 = V1->stripPointerCastsAndBarriers(); 1582 V2 = V2->stripPointerCastsAndBarriers(); 1583 1584 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1585 // value for undef that aliases nothing in the program. 1586 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1587 return NoAlias; 1588 1589 // Are we checking for alias of the same value? 1590 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1591 // different iterations. We must therefore make sure that this is not the 1592 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1593 // happen by looking at the visited phi nodes and making sure they cannot 1594 // reach the value. 1595 if (isValueEqualInPotentialCycles(V1, V2)) 1596 return MustAlias; 1597 1598 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1599 return NoAlias; // Scalars cannot alias each other 1600 1601 // Figure out what objects these things are pointing to if we can. 1602 if (O1 == nullptr) 1603 O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth); 1604 1605 if (O2 == nullptr) 1606 O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth); 1607 1608 // Null values in the default address space don't point to any object, so they 1609 // don't alias any other pointer. 1610 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1611 if (CPN->getType()->getAddressSpace() == 0) 1612 return NoAlias; 1613 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1614 if (CPN->getType()->getAddressSpace() == 0) 1615 return NoAlias; 1616 1617 if (O1 != O2) { 1618 // If V1/V2 point to two different objects, we know that we have no alias. 1619 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1620 return NoAlias; 1621 1622 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1623 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1624 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1625 return NoAlias; 1626 1627 // Function arguments can't alias with things that are known to be 1628 // unambigously identified at the function level. 1629 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1630 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1631 return NoAlias; 1632 1633 // If one pointer is the result of a call/invoke or load and the other is a 1634 // non-escaping local object within the same function, then we know the 1635 // object couldn't escape to a point where the call could return it. 1636 // 1637 // Note that if the pointers are in different functions, there are a 1638 // variety of complications. A call with a nocapture argument may still 1639 // temporary store the nocapture argument's value in a temporary memory 1640 // location if that memory location doesn't escape. Or it may pass a 1641 // nocapture value to other functions as long as they don't capture it. 1642 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2)) 1643 return NoAlias; 1644 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1)) 1645 return NoAlias; 1646 } 1647 1648 // If the size of one access is larger than the entire object on the other 1649 // side, then we know such behavior is undefined and can assume no alias. 1650 if ((V1Size != MemoryLocation::UnknownSize && 1651 isObjectSmallerThan(O2, V1Size, DL, TLI)) || 1652 (V2Size != MemoryLocation::UnknownSize && 1653 isObjectSmallerThan(O1, V2Size, DL, TLI))) 1654 return NoAlias; 1655 1656 // Check the cache before climbing up use-def chains. This also terminates 1657 // otherwise infinitely recursive queries. 1658 LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo), 1659 MemoryLocation(V2, V2Size, V2AAInfo)); 1660 if (V1 > V2) 1661 std::swap(Locs.first, Locs.second); 1662 std::pair<AliasCacheTy::iterator, bool> Pair = 1663 AliasCache.insert(std::make_pair(Locs, MayAlias)); 1664 if (!Pair.second) 1665 return Pair.first->second; 1666 1667 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the 1668 // GEP can't simplify, we don't even look at the PHI cases. 1669 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) { 1670 std::swap(V1, V2); 1671 std::swap(V1Size, V2Size); 1672 std::swap(O1, O2); 1673 std::swap(V1AAInfo, V2AAInfo); 1674 } 1675 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1676 AliasResult Result = 1677 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2); 1678 if (Result != MayAlias) 1679 return AliasCache[Locs] = Result; 1680 } 1681 1682 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { 1683 std::swap(V1, V2); 1684 std::swap(O1, O2); 1685 std::swap(V1Size, V2Size); 1686 std::swap(V1AAInfo, V2AAInfo); 1687 } 1688 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1689 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, 1690 V2, V2Size, V2AAInfo, O2); 1691 if (Result != MayAlias) 1692 return AliasCache[Locs] = Result; 1693 } 1694 1695 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { 1696 std::swap(V1, V2); 1697 std::swap(O1, O2); 1698 std::swap(V1Size, V2Size); 1699 std::swap(V1AAInfo, V2AAInfo); 1700 } 1701 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1702 AliasResult Result = 1703 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2); 1704 if (Result != MayAlias) 1705 return AliasCache[Locs] = Result; 1706 } 1707 1708 // If both pointers are pointing into the same object and one of them 1709 // accesses the entire object, then the accesses must overlap in some way. 1710 if (O1 == O2) 1711 if (V1Size != MemoryLocation::UnknownSize && 1712 V2Size != MemoryLocation::UnknownSize && 1713 (isObjectSize(O1, V1Size, DL, TLI) || 1714 isObjectSize(O2, V2Size, DL, TLI))) 1715 return AliasCache[Locs] = PartialAlias; 1716 1717 // Recurse back into the best AA results we have, potentially with refined 1718 // memory locations. We have already ensured that BasicAA has a MayAlias 1719 // cache result for these, so any recursion back into BasicAA won't loop. 1720 AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second); 1721 return AliasCache[Locs] = Result; 1722 } 1723 1724 /// Check whether two Values can be considered equivalent. 1725 /// 1726 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1727 /// they can not be part of a cycle in the value graph by looking at all 1728 /// visited phi nodes an making sure that the phis cannot reach the value. We 1729 /// have to do this because we are looking through phi nodes (That is we say 1730 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1731 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1732 const Value *V2) { 1733 if (V != V2) 1734 return false; 1735 1736 const Instruction *Inst = dyn_cast<Instruction>(V); 1737 if (!Inst) 1738 return true; 1739 1740 if (VisitedPhiBBs.empty()) 1741 return true; 1742 1743 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1744 return false; 1745 1746 // Make sure that the visited phis cannot reach the Value. This ensures that 1747 // the Values cannot come from different iterations of a potential cycle the 1748 // phi nodes could be involved in. 1749 for (auto *P : VisitedPhiBBs) 1750 if (isPotentiallyReachable(&P->front(), Inst, DT, LI)) 1751 return false; 1752 1753 return true; 1754 } 1755 1756 /// Computes the symbolic difference between two de-composed GEPs. 1757 /// 1758 /// Dest and Src are the variable indices from two decomposed GetElementPtr 1759 /// instructions GEP1 and GEP2 which have common base pointers. 1760 void BasicAAResult::GetIndexDifference( 1761 SmallVectorImpl<VariableGEPIndex> &Dest, 1762 const SmallVectorImpl<VariableGEPIndex> &Src) { 1763 if (Src.empty()) 1764 return; 1765 1766 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 1767 const Value *V = Src[i].V; 1768 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits; 1769 int64_t Scale = Src[i].Scale; 1770 1771 // Find V in Dest. This is N^2, but pointer indices almost never have more 1772 // than a few variable indexes. 1773 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 1774 if (!isValueEqualInPotentialCycles(Dest[j].V, V) || 1775 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits) 1776 continue; 1777 1778 // If we found it, subtract off Scale V's from the entry in Dest. If it 1779 // goes to zero, remove the entry. 1780 if (Dest[j].Scale != Scale) 1781 Dest[j].Scale -= Scale; 1782 else 1783 Dest.erase(Dest.begin() + j); 1784 Scale = 0; 1785 break; 1786 } 1787 1788 // If we didn't consume this entry, add it to the end of the Dest list. 1789 if (Scale) { 1790 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale}; 1791 Dest.push_back(Entry); 1792 } 1793 } 1794 } 1795 1796 bool BasicAAResult::constantOffsetHeuristic( 1797 const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size, 1798 uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC, 1799 DominatorTree *DT) { 1800 if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize || 1801 V2Size == MemoryLocation::UnknownSize) 1802 return false; 1803 1804 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1]; 1805 1806 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits || 1807 Var0.Scale != -Var1.Scale) 1808 return false; 1809 1810 unsigned Width = Var1.V->getType()->getIntegerBitWidth(); 1811 1812 // We'll strip off the Extensions of Var0 and Var1 and do another round 1813 // of GetLinearExpression decomposition. In the example above, if Var0 1814 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 1815 1816 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0), 1817 V1Offset(Width, 0); 1818 bool NSW = true, NUW = true; 1819 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0; 1820 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits, 1821 V0SExtBits, DL, 0, AC, DT, NSW, NUW); 1822 NSW = true; 1823 NUW = true; 1824 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits, 1825 V1SExtBits, DL, 0, AC, DT, NSW, NUW); 1826 1827 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits || 1828 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1)) 1829 return false; 1830 1831 // We have a hit - Var0 and Var1 only differ by a constant offset! 1832 1833 // If we've been sext'ed then zext'd the maximum difference between Var0 and 1834 // Var1 is possible to calculate, but we're just interested in the absolute 1835 // minimum difference between the two. The minimum distance may occur due to 1836 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 1837 // the minimum distance between %i and %i + 5 is 3. 1838 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff; 1839 MinDiff = APIntOps::umin(MinDiff, Wrapped); 1840 uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale); 1841 1842 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 1843 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 1844 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 1845 // V2Size can fit in the MinDiffBytes gap. 1846 return V1Size + std::abs(BaseOffset) <= MinDiffBytes && 1847 V2Size + std::abs(BaseOffset) <= MinDiffBytes; 1848 } 1849 1850 //===----------------------------------------------------------------------===// 1851 // BasicAliasAnalysis Pass 1852 //===----------------------------------------------------------------------===// 1853 1854 AnalysisKey BasicAA::Key; 1855 1856 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 1857 return BasicAAResult(F.getParent()->getDataLayout(), 1858 AM.getResult<TargetLibraryAnalysis>(F), 1859 AM.getResult<AssumptionAnalysis>(F), 1860 &AM.getResult<DominatorTreeAnalysis>(F), 1861 AM.getCachedResult<LoopAnalysis>(F)); 1862 } 1863 1864 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 1865 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 1866 } 1867 1868 char BasicAAWrapperPass::ID = 0; 1869 1870 void BasicAAWrapperPass::anchor() {} 1871 1872 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa", 1873 "Basic Alias Analysis (stateless AA impl)", true, true) 1874 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1875 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1876 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1877 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa", 1878 "Basic Alias Analysis (stateless AA impl)", true, true) 1879 1880 FunctionPass *llvm::createBasicAAWrapperPass() { 1881 return new BasicAAWrapperPass(); 1882 } 1883 1884 bool BasicAAWrapperPass::runOnFunction(Function &F) { 1885 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 1886 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 1887 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 1888 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 1889 1890 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(), 1891 ACT.getAssumptionCache(F), &DTWP.getDomTree(), 1892 LIWP ? &LIWP->getLoopInfo() : nullptr)); 1893 1894 return false; 1895 } 1896 1897 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1898 AU.setPreservesAll(); 1899 AU.addRequired<AssumptionCacheTracker>(); 1900 AU.addRequired<DominatorTreeWrapperPass>(); 1901 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1902 } 1903 1904 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 1905 return BasicAAResult( 1906 F.getParent()->getDataLayout(), 1907 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 1908 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 1909 } 1910