1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the primary stateless implementation of the 10 // Alias Analysis interface that implements identities (two different 11 // globals cannot alias, etc), but does no stateful analysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/BasicAliasAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ScopeExit.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/CFG.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/MemoryLocation.h" 28 #include "llvm/Analysis/PhiValues.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/Dominators.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/IR/GetElementPtrTypeIterator.h" 40 #include "llvm/IR/GlobalAlias.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/InstrTypes.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/IR/Intrinsics.h" 47 #include "llvm/IR/Metadata.h" 48 #include "llvm/IR/Operator.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/IR/User.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/InitializePasses.h" 53 #include "llvm/Pass.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/KnownBits.h" 58 #include <cassert> 59 #include <cstdint> 60 #include <cstdlib> 61 #include <utility> 62 63 #define DEBUG_TYPE "basicaa" 64 65 using namespace llvm; 66 67 /// Enable analysis of recursive PHI nodes. 68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, 69 cl::init(true)); 70 71 /// By default, even on 32-bit architectures we use 64-bit integers for 72 /// calculations. This will allow us to more-aggressively decompose indexing 73 /// expressions calculated using i64 values (e.g., long long in C) which is 74 /// common enough to worry about. 75 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b", 76 cl::Hidden, cl::init(true)); 77 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits", 78 cl::Hidden, cl::init(false)); 79 80 /// SearchLimitReached / SearchTimes shows how often the limit of 81 /// to decompose GEPs is reached. It will affect the precision 82 /// of basic alias analysis. 83 STATISTIC(SearchLimitReached, "Number of times the limit to " 84 "decompose GEPs is reached"); 85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 86 87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 89 /// careful with value equivalence. We use reachability to make sure a value 90 /// cannot be involved in a cycle. 91 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 92 93 // The max limit of the search depth in DecomposeGEPExpression() and 94 // getUnderlyingObject(), both functions need to use the same search 95 // depth otherwise the algorithm in aliasGEP will assert. 96 static const unsigned MaxLookupSearchDepth = 6; 97 98 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, 99 FunctionAnalysisManager::Invalidator &Inv) { 100 // We don't care if this analysis itself is preserved, it has no state. But 101 // we need to check that the analyses it depends on have been. Note that we 102 // may be created without handles to some analyses and in that case don't 103 // depend on them. 104 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) || 105 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) || 106 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA))) 107 return true; 108 109 // Otherwise this analysis result remains valid. 110 return false; 111 } 112 113 //===----------------------------------------------------------------------===// 114 // Useful predicates 115 //===----------------------------------------------------------------------===// 116 117 /// Returns true if the pointer is one which would have been considered an 118 /// escape by isNonEscapingLocalObject. 119 static bool isEscapeSource(const Value *V) { 120 if (isa<CallBase>(V)) 121 return true; 122 123 // The load case works because isNonEscapingLocalObject considers all 124 // stores to be escapes (it passes true for the StoreCaptures argument 125 // to PointerMayBeCaptured). 126 if (isa<LoadInst>(V)) 127 return true; 128 129 // The inttoptr case works because isNonEscapingLocalObject considers all 130 // means of converting or equating a pointer to an int (ptrtoint, ptr store 131 // which could be followed by an integer load, ptr<->int compare) as 132 // escaping, and objects located at well-known addresses via platform-specific 133 // means cannot be considered non-escaping local objects. 134 if (isa<IntToPtrInst>(V)) 135 return true; 136 137 return false; 138 } 139 140 /// Returns the size of the object specified by V or UnknownSize if unknown. 141 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 142 const TargetLibraryInfo &TLI, 143 bool NullIsValidLoc, 144 bool RoundToAlign = false) { 145 uint64_t Size; 146 ObjectSizeOpts Opts; 147 Opts.RoundToAlign = RoundToAlign; 148 Opts.NullIsUnknownSize = NullIsValidLoc; 149 if (getObjectSize(V, Size, DL, &TLI, Opts)) 150 return Size; 151 return MemoryLocation::UnknownSize; 152 } 153 154 /// Returns true if we can prove that the object specified by V is smaller than 155 /// Size. 156 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 157 const DataLayout &DL, 158 const TargetLibraryInfo &TLI, 159 bool NullIsValidLoc) { 160 // Note that the meanings of the "object" are slightly different in the 161 // following contexts: 162 // c1: llvm::getObjectSize() 163 // c2: llvm.objectsize() intrinsic 164 // c3: isObjectSmallerThan() 165 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 166 // refers to the "entire object". 167 // 168 // Consider this example: 169 // char *p = (char*)malloc(100) 170 // char *q = p+80; 171 // 172 // In the context of c1 and c2, the "object" pointed by q refers to the 173 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 174 // 175 // However, in the context of c3, the "object" refers to the chunk of memory 176 // being allocated. So, the "object" has 100 bytes, and q points to the middle 177 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 178 // parameter, before the llvm::getObjectSize() is called to get the size of 179 // entire object, we should: 180 // - either rewind the pointer q to the base-address of the object in 181 // question (in this case rewind to p), or 182 // - just give up. It is up to caller to make sure the pointer is pointing 183 // to the base address the object. 184 // 185 // We go for 2nd option for simplicity. 186 if (!isIdentifiedObject(V)) 187 return false; 188 189 // This function needs to use the aligned object size because we allow 190 // reads a bit past the end given sufficient alignment. 191 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, 192 /*RoundToAlign*/ true); 193 194 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 195 } 196 197 /// Return the minimal extent from \p V to the end of the underlying object, 198 /// assuming the result is used in an aliasing query. E.g., we do use the query 199 /// location size and the fact that null pointers cannot alias here. 200 static uint64_t getMinimalExtentFrom(const Value &V, 201 const LocationSize &LocSize, 202 const DataLayout &DL, 203 bool NullIsValidLoc) { 204 // If we have dereferenceability information we know a lower bound for the 205 // extent as accesses for a lower offset would be valid. We need to exclude 206 // the "or null" part if null is a valid pointer. 207 bool CanBeNull, CanBeFreed; 208 uint64_t DerefBytes = 209 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 210 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; 211 DerefBytes = CanBeFreed ? 0 : DerefBytes; 212 // If queried with a precise location size, we assume that location size to be 213 // accessed, thus valid. 214 if (LocSize.isPrecise()) 215 DerefBytes = std::max(DerefBytes, LocSize.getValue()); 216 return DerefBytes; 217 } 218 219 /// Returns true if we can prove that the object specified by V has size Size. 220 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 221 const TargetLibraryInfo &TLI, bool NullIsValidLoc) { 222 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc); 223 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 224 } 225 226 //===----------------------------------------------------------------------===// 227 // CaptureInfo implementations 228 //===----------------------------------------------------------------------===// 229 230 CaptureInfo::~CaptureInfo() = default; 231 232 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object, 233 const Instruction *I) { 234 return isNonEscapingLocalObject(Object, &IsCapturedCache); 235 } 236 237 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object, 238 const Instruction *I) { 239 if (!isIdentifiedFunctionLocal(Object)) 240 return false; 241 242 auto Iter = EarliestEscapes.insert({Object, nullptr}); 243 if (Iter.second) { 244 Instruction *EarliestCapture = FindEarliestCapture( 245 Object, *const_cast<Function *>(I->getFunction()), 246 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT); 247 if (EarliestCapture) { 248 auto Ins = Inst2Obj.insert({EarliestCapture, {}}); 249 Ins.first->second.push_back(Object); 250 } 251 Iter.first->second = EarliestCapture; 252 } 253 254 // No capturing instruction. 255 if (!Iter.first->second) 256 return true; 257 258 return I != Iter.first->second && 259 !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI); 260 } 261 262 void EarliestEscapeInfo::removeInstruction(Instruction *I) { 263 auto Iter = Inst2Obj.find(I); 264 if (Iter != Inst2Obj.end()) { 265 for (const Value *Obj : Iter->second) 266 EarliestEscapes.erase(Obj); 267 Inst2Obj.erase(I); 268 } 269 } 270 271 //===----------------------------------------------------------------------===// 272 // GetElementPtr Instruction Decomposition and Analysis 273 //===----------------------------------------------------------------------===// 274 275 namespace { 276 /// Represents zext(sext(V)). 277 struct ExtendedValue { 278 const Value *V; 279 unsigned ZExtBits; 280 unsigned SExtBits; 281 282 explicit ExtendedValue(const Value *V, unsigned ZExtBits = 0, 283 unsigned SExtBits = 0) 284 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {} 285 286 unsigned getBitWidth() const { 287 return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits; 288 } 289 290 ExtendedValue withValue(const Value *NewV) const { 291 return ExtendedValue(NewV, ZExtBits, SExtBits); 292 } 293 294 ExtendedValue withZExtOfValue(const Value *NewV) const { 295 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - 296 NewV->getType()->getPrimitiveSizeInBits(); 297 // zext(sext(zext(NewV))) == zext(zext(zext(NewV))) 298 return ExtendedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0); 299 } 300 301 ExtendedValue withSExtOfValue(const Value *NewV) const { 302 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - 303 NewV->getType()->getPrimitiveSizeInBits(); 304 // zext(sext(sext(NewV))) 305 return ExtendedValue(NewV, ZExtBits, SExtBits + ExtendBy); 306 } 307 308 APInt evaluateWith(APInt N) const { 309 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 310 "Incompatible bit width"); 311 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits); 312 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits); 313 return N; 314 } 315 316 bool canDistributeOver(bool NUW, bool NSW) const { 317 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y) 318 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y) 319 return (!ZExtBits || NUW) && (!SExtBits || NSW); 320 } 321 }; 322 323 /// Represents zext(sext(V)) * Scale + Offset. 324 struct LinearExpression { 325 ExtendedValue Val; 326 APInt Scale; 327 APInt Offset; 328 329 /// True if all operations in this expression are NSW. 330 bool IsNSW; 331 332 LinearExpression(const ExtendedValue &Val, const APInt &Scale, 333 const APInt &Offset, bool IsNSW) 334 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {} 335 336 LinearExpression(const ExtendedValue &Val) : Val(Val), IsNSW(true) { 337 unsigned BitWidth = Val.getBitWidth(); 338 Scale = APInt(BitWidth, 1); 339 Offset = APInt(BitWidth, 0); 340 } 341 }; 342 } 343 344 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 345 /// B are constant integers. 346 static LinearExpression GetLinearExpression( 347 const ExtendedValue &Val, const DataLayout &DL, unsigned Depth, 348 AssumptionCache *AC, DominatorTree *DT) { 349 // Limit our recursion depth. 350 if (Depth == 6) 351 return Val; 352 353 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V)) 354 return LinearExpression(Val, APInt(Val.getBitWidth(), 0), 355 Val.evaluateWith(Const->getValue()), true); 356 357 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) { 358 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 359 APInt RHS = Val.evaluateWith(RHSC->getValue()); 360 // The only non-OBO case we deal with is or, and only limited to the 361 // case where it is both nuw and nsw. 362 bool NUW = true, NSW = true; 363 if (isa<OverflowingBinaryOperator>(BOp)) { 364 NUW &= BOp->hasNoUnsignedWrap(); 365 NSW &= BOp->hasNoSignedWrap(); 366 } 367 if (!Val.canDistributeOver(NUW, NSW)) 368 return Val; 369 370 LinearExpression E(Val); 371 switch (BOp->getOpcode()) { 372 default: 373 // We don't understand this instruction, so we can't decompose it any 374 // further. 375 return Val; 376 case Instruction::Or: 377 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 378 // analyze it. 379 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 380 BOp, DT)) 381 return Val; 382 383 LLVM_FALLTHROUGH; 384 case Instruction::Add: { 385 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 386 Depth + 1, AC, DT); 387 E.Offset += RHS; 388 E.IsNSW &= NSW; 389 break; 390 } 391 case Instruction::Sub: { 392 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 393 Depth + 1, AC, DT); 394 E.Offset -= RHS; 395 E.IsNSW &= NSW; 396 break; 397 } 398 case Instruction::Mul: { 399 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 400 Depth + 1, AC, DT); 401 E.Offset *= RHS; 402 E.Scale *= RHS; 403 E.IsNSW &= NSW; 404 break; 405 } 406 case Instruction::Shl: 407 // We're trying to linearize an expression of the kind: 408 // shl i8 -128, 36 409 // where the shift count exceeds the bitwidth of the type. 410 // We can't decompose this further (the expression would return 411 // a poison value). 412 if (RHS.getLimitedValue() > Val.getBitWidth()) 413 return Val; 414 415 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 416 Depth + 1, AC, DT); 417 E.Offset <<= RHS.getLimitedValue(); 418 E.Scale <<= RHS.getLimitedValue(); 419 E.IsNSW &= NSW; 420 break; 421 } 422 return E; 423 } 424 } 425 426 if (isa<ZExtInst>(Val.V)) 427 return GetLinearExpression( 428 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), 429 DL, Depth + 1, AC, DT); 430 431 if (isa<SExtInst>(Val.V)) 432 return GetLinearExpression( 433 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), 434 DL, Depth + 1, AC, DT); 435 436 return Val; 437 } 438 439 /// To ensure a pointer offset fits in an integer of size PointerSize 440 /// (in bits) when that size is smaller than the maximum pointer size. This is 441 /// an issue, for example, in particular for 32b pointers with negative indices 442 /// that rely on two's complement wrap-arounds for precise alias information 443 /// where the maximum pointer size is 64b. 444 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) { 445 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!"); 446 unsigned ShiftBits = Offset.getBitWidth() - PointerSize; 447 return (Offset << ShiftBits).ashr(ShiftBits); 448 } 449 450 static unsigned getMaxPointerSize(const DataLayout &DL) { 451 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits(); 452 if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64; 453 if (DoubleCalcBits) MaxPointerSize *= 2; 454 455 return MaxPointerSize; 456 } 457 458 /// If V is a symbolic pointer expression, decompose it into a base pointer 459 /// with a constant offset and a number of scaled symbolic offsets. 460 /// 461 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 462 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 463 /// specified amount, but which may have other unrepresented high bits. As 464 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 465 /// 466 /// This function is capable of analyzing everything that getUnderlyingObject 467 /// can look through. To be able to do that getUnderlyingObject and 468 /// DecomposeGEPExpression must use the same search depth 469 /// (MaxLookupSearchDepth). 470 BasicAAResult::DecomposedGEP 471 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL, 472 AssumptionCache *AC, DominatorTree *DT) { 473 // Limit recursion depth to limit compile time in crazy cases. 474 unsigned MaxLookup = MaxLookupSearchDepth; 475 SearchTimes++; 476 const Instruction *CxtI = dyn_cast<Instruction>(V); 477 478 unsigned MaxPointerSize = getMaxPointerSize(DL); 479 DecomposedGEP Decomposed; 480 Decomposed.Offset = APInt(MaxPointerSize, 0); 481 Decomposed.HasCompileTimeConstantScale = true; 482 do { 483 // See if this is a bitcast or GEP. 484 const Operator *Op = dyn_cast<Operator>(V); 485 if (!Op) { 486 // The only non-operator case we can handle are GlobalAliases. 487 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 488 if (!GA->isInterposable()) { 489 V = GA->getAliasee(); 490 continue; 491 } 492 } 493 Decomposed.Base = V; 494 return Decomposed; 495 } 496 497 if (Op->getOpcode() == Instruction::BitCast || 498 Op->getOpcode() == Instruction::AddrSpaceCast) { 499 V = Op->getOperand(0); 500 continue; 501 } 502 503 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 504 if (!GEPOp) { 505 if (const auto *PHI = dyn_cast<PHINode>(V)) { 506 // Look through single-arg phi nodes created by LCSSA. 507 if (PHI->getNumIncomingValues() == 1) { 508 V = PHI->getIncomingValue(0); 509 continue; 510 } 511 } else if (const auto *Call = dyn_cast<CallBase>(V)) { 512 // CaptureTracking can know about special capturing properties of some 513 // intrinsics like launder.invariant.group, that can't be expressed with 514 // the attributes, but have properties like returning aliasing pointer. 515 // Because some analysis may assume that nocaptured pointer is not 516 // returned from some special intrinsic (because function would have to 517 // be marked with returns attribute), it is crucial to use this function 518 // because it should be in sync with CaptureTracking. Not using it may 519 // cause weird miscompilations where 2 aliasing pointers are assumed to 520 // noalias. 521 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 522 V = RP; 523 continue; 524 } 525 } 526 527 Decomposed.Base = V; 528 return Decomposed; 529 } 530 531 // Track whether we've seen at least one in bounds gep, and if so, whether 532 // all geps parsed were in bounds. 533 if (Decomposed.InBounds == None) 534 Decomposed.InBounds = GEPOp->isInBounds(); 535 else if (!GEPOp->isInBounds()) 536 Decomposed.InBounds = false; 537 538 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized"); 539 540 // Don't attempt to analyze GEPs if index scale is not a compile-time 541 // constant. 542 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) { 543 Decomposed.Base = V; 544 Decomposed.HasCompileTimeConstantScale = false; 545 return Decomposed; 546 } 547 548 unsigned AS = GEPOp->getPointerAddressSpace(); 549 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 550 gep_type_iterator GTI = gep_type_begin(GEPOp); 551 unsigned PointerSize = DL.getPointerSizeInBits(AS); 552 // Assume all GEP operands are constants until proven otherwise. 553 bool GepHasConstantOffset = true; 554 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 555 I != E; ++I, ++GTI) { 556 const Value *Index = *I; 557 // Compute the (potentially symbolic) offset in bytes for this index. 558 if (StructType *STy = GTI.getStructTypeOrNull()) { 559 // For a struct, add the member offset. 560 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 561 if (FieldNo == 0) 562 continue; 563 564 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo); 565 continue; 566 } 567 568 // For an array/pointer, add the element offset, explicitly scaled. 569 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 570 if (CIdx->isZero()) 571 continue; 572 Decomposed.Offset += 573 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() * 574 CIdx->getValue().sextOrTrunc(MaxPointerSize); 575 continue; 576 } 577 578 GepHasConstantOffset = false; 579 580 APInt Scale(MaxPointerSize, 581 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); 582 // If the integer type is smaller than the pointer size, it is implicitly 583 // sign extended to pointer size. 584 unsigned Width = Index->getType()->getIntegerBitWidth(); 585 unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0; 586 LinearExpression LE = GetLinearExpression( 587 ExtendedValue(Index, 0, SExtBits), DL, 0, AC, DT); 588 589 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 590 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 591 592 // It can be the case that, even through C1*V+C2 does not overflow for 593 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot 594 // decompose the expression in this way. 595 // 596 // FIXME: C1*Scale and the other operations in the decomposed 597 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this 598 // possibility. 599 bool Overflow; 600 APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize) 601 .smul_ov(Scale, Overflow); 602 if (Overflow) { 603 LE = LinearExpression(ExtendedValue(Index, 0, SExtBits)); 604 } else { 605 Decomposed.Offset += ScaledOffset; 606 Scale *= LE.Scale.sextOrTrunc(MaxPointerSize); 607 } 608 609 // If we already had an occurrence of this index variable, merge this 610 // scale into it. For example, we want to handle: 611 // A[x][x] -> x*16 + x*4 -> x*20 612 // This also ensures that 'x' only appears in the index list once. 613 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 614 if (Decomposed.VarIndices[i].V == LE.Val.V && 615 Decomposed.VarIndices[i].ZExtBits == LE.Val.ZExtBits && 616 Decomposed.VarIndices[i].SExtBits == LE.Val.SExtBits) { 617 Scale += Decomposed.VarIndices[i].Scale; 618 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 619 break; 620 } 621 } 622 623 // Make sure that we have a scale that makes sense for this target's 624 // pointer size. 625 Scale = adjustToPointerSize(Scale, PointerSize); 626 627 if (!!Scale) { 628 VariableGEPIndex Entry = { 629 LE.Val.V, LE.Val.ZExtBits, LE.Val.SExtBits, Scale, CxtI, LE.IsNSW}; 630 Decomposed.VarIndices.push_back(Entry); 631 } 632 } 633 634 // Take care of wrap-arounds 635 if (GepHasConstantOffset) 636 Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize); 637 638 // Analyze the base pointer next. 639 V = GEPOp->getOperand(0); 640 } while (--MaxLookup); 641 642 // If the chain of expressions is too deep, just return early. 643 Decomposed.Base = V; 644 SearchLimitReached++; 645 return Decomposed; 646 } 647 648 /// Returns whether the given pointer value points to memory that is local to 649 /// the function, with global constants being considered local to all 650 /// functions. 651 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 652 AAQueryInfo &AAQI, bool OrLocal) { 653 assert(Visited.empty() && "Visited must be cleared after use!"); 654 655 unsigned MaxLookup = 8; 656 SmallVector<const Value *, 16> Worklist; 657 Worklist.push_back(Loc.Ptr); 658 do { 659 const Value *V = getUnderlyingObject(Worklist.pop_back_val()); 660 if (!Visited.insert(V).second) { 661 Visited.clear(); 662 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 663 } 664 665 // An alloca instruction defines local memory. 666 if (OrLocal && isa<AllocaInst>(V)) 667 continue; 668 669 // A global constant counts as local memory for our purposes. 670 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 671 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 672 // global to be marked constant in some modules and non-constant in 673 // others. GV may even be a declaration, not a definition. 674 if (!GV->isConstant()) { 675 Visited.clear(); 676 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 677 } 678 continue; 679 } 680 681 // If both select values point to local memory, then so does the select. 682 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 683 Worklist.push_back(SI->getTrueValue()); 684 Worklist.push_back(SI->getFalseValue()); 685 continue; 686 } 687 688 // If all values incoming to a phi node point to local memory, then so does 689 // the phi. 690 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 691 // Don't bother inspecting phi nodes with many operands. 692 if (PN->getNumIncomingValues() > MaxLookup) { 693 Visited.clear(); 694 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 695 } 696 append_range(Worklist, PN->incoming_values()); 697 continue; 698 } 699 700 // Otherwise be conservative. 701 Visited.clear(); 702 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 703 } while (!Worklist.empty() && --MaxLookup); 704 705 Visited.clear(); 706 return Worklist.empty(); 707 } 708 709 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { 710 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call); 711 return II && II->getIntrinsicID() == IID; 712 } 713 714 /// Returns the behavior when calling the given call site. 715 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { 716 if (Call->doesNotAccessMemory()) 717 // Can't do better than this. 718 return FMRB_DoesNotAccessMemory; 719 720 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 721 722 // If the callsite knows it only reads memory, don't return worse 723 // than that. 724 if (Call->onlyReadsMemory()) 725 Min = FMRB_OnlyReadsMemory; 726 else if (Call->doesNotReadMemory()) 727 Min = FMRB_OnlyWritesMemory; 728 729 if (Call->onlyAccessesArgMemory()) 730 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 731 else if (Call->onlyAccessesInaccessibleMemory()) 732 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 733 else if (Call->onlyAccessesInaccessibleMemOrArgMem()) 734 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 735 736 // If the call has operand bundles then aliasing attributes from the function 737 // it calls do not directly apply to the call. This can be made more precise 738 // in the future. 739 if (!Call->hasOperandBundles()) 740 if (const Function *F = Call->getCalledFunction()) 741 Min = 742 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 743 744 return Min; 745 } 746 747 /// Returns the behavior when calling the given function. For use when the call 748 /// site is not known. 749 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 750 // If the function declares it doesn't access memory, we can't do better. 751 if (F->doesNotAccessMemory()) 752 return FMRB_DoesNotAccessMemory; 753 754 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 755 756 // If the function declares it only reads memory, go with that. 757 if (F->onlyReadsMemory()) 758 Min = FMRB_OnlyReadsMemory; 759 else if (F->doesNotReadMemory()) 760 Min = FMRB_OnlyWritesMemory; 761 762 if (F->onlyAccessesArgMemory()) 763 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 764 else if (F->onlyAccessesInaccessibleMemory()) 765 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 766 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 767 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 768 769 return Min; 770 } 771 772 /// Returns true if this is a writeonly (i.e Mod only) parameter. 773 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, 774 const TargetLibraryInfo &TLI) { 775 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) 776 return true; 777 778 // We can bound the aliasing properties of memset_pattern16 just as we can 779 // for memcpy/memset. This is particularly important because the 780 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 781 // whenever possible. 782 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 783 // attributes. 784 LibFunc F; 785 if (Call->getCalledFunction() && 786 TLI.getLibFunc(*Call->getCalledFunction(), F) && 787 F == LibFunc_memset_pattern16 && TLI.has(F)) 788 if (ArgIdx == 0) 789 return true; 790 791 // TODO: memset_pattern4, memset_pattern8 792 // TODO: _chk variants 793 // TODO: strcmp, strcpy 794 795 return false; 796 } 797 798 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, 799 unsigned ArgIdx) { 800 // Checking for known builtin intrinsics and target library functions. 801 if (isWriteOnlyParam(Call, ArgIdx, TLI)) 802 return ModRefInfo::Mod; 803 804 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) 805 return ModRefInfo::Ref; 806 807 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) 808 return ModRefInfo::NoModRef; 809 810 return AAResultBase::getArgModRefInfo(Call, ArgIdx); 811 } 812 813 #ifndef NDEBUG 814 static const Function *getParent(const Value *V) { 815 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 816 if (!inst->getParent()) 817 return nullptr; 818 return inst->getParent()->getParent(); 819 } 820 821 if (const Argument *arg = dyn_cast<Argument>(V)) 822 return arg->getParent(); 823 824 return nullptr; 825 } 826 827 static bool notDifferentParent(const Value *O1, const Value *O2) { 828 829 const Function *F1 = getParent(O1); 830 const Function *F2 = getParent(O2); 831 832 return !F1 || !F2 || F1 == F2; 833 } 834 #endif 835 836 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 837 const MemoryLocation &LocB, 838 AAQueryInfo &AAQI) { 839 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 840 "BasicAliasAnalysis doesn't support interprocedural queries."); 841 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI); 842 } 843 844 /// Checks to see if the specified callsite can clobber the specified memory 845 /// object. 846 /// 847 /// Since we only look at local properties of this function, we really can't 848 /// say much about this query. We do, however, use simple "address taken" 849 /// analysis on local objects. 850 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, 851 const MemoryLocation &Loc, 852 AAQueryInfo &AAQI) { 853 assert(notDifferentParent(Call, Loc.Ptr) && 854 "AliasAnalysis query involving multiple functions!"); 855 856 const Value *Object = getUnderlyingObject(Loc.Ptr); 857 858 // Calls marked 'tail' cannot read or write allocas from the current frame 859 // because the current frame might be destroyed by the time they run. However, 860 // a tail call may use an alloca with byval. Calling with byval copies the 861 // contents of the alloca into argument registers or stack slots, so there is 862 // no lifetime issue. 863 if (isa<AllocaInst>(Object)) 864 if (const CallInst *CI = dyn_cast<CallInst>(Call)) 865 if (CI->isTailCall() && 866 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 867 return ModRefInfo::NoModRef; 868 869 // Stack restore is able to modify unescaped dynamic allocas. Assume it may 870 // modify them even though the alloca is not escaped. 871 if (auto *AI = dyn_cast<AllocaInst>(Object)) 872 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore)) 873 return ModRefInfo::Mod; 874 875 // If the pointer is to a locally allocated object that does not escape, 876 // then the call can not mod/ref the pointer unless the call takes the pointer 877 // as an argument, and itself doesn't capture it. 878 if (!isa<Constant>(Object) && Call != Object && 879 AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) { 880 881 // Optimistically assume that call doesn't touch Object and check this 882 // assumption in the following loop. 883 ModRefInfo Result = ModRefInfo::NoModRef; 884 bool IsMustAlias = true; 885 886 unsigned OperandNo = 0; 887 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); 888 CI != CE; ++CI, ++OperandNo) { 889 // Only look at the no-capture or byval pointer arguments. If this 890 // pointer were passed to arguments that were neither of these, then it 891 // couldn't be no-capture. 892 if (!(*CI)->getType()->isPointerTy() || 893 (!Call->doesNotCapture(OperandNo) && 894 OperandNo < Call->getNumArgOperands() && 895 !Call->isByValArgument(OperandNo))) 896 continue; 897 898 // Call doesn't access memory through this operand, so we don't care 899 // if it aliases with Object. 900 if (Call->doesNotAccessMemory(OperandNo)) 901 continue; 902 903 // If this is a no-capture pointer argument, see if we can tell that it 904 // is impossible to alias the pointer we're checking. 905 AliasResult AR = getBestAAResults().alias( 906 MemoryLocation::getBeforeOrAfter(*CI), 907 MemoryLocation::getBeforeOrAfter(Object), AAQI); 908 if (AR != AliasResult::MustAlias) 909 IsMustAlias = false; 910 // Operand doesn't alias 'Object', continue looking for other aliases 911 if (AR == AliasResult::NoAlias) 912 continue; 913 // Operand aliases 'Object', but call doesn't modify it. Strengthen 914 // initial assumption and keep looking in case if there are more aliases. 915 if (Call->onlyReadsMemory(OperandNo)) { 916 Result = setRef(Result); 917 continue; 918 } 919 // Operand aliases 'Object' but call only writes into it. 920 if (Call->doesNotReadMemory(OperandNo)) { 921 Result = setMod(Result); 922 continue; 923 } 924 // This operand aliases 'Object' and call reads and writes into it. 925 // Setting ModRef will not yield an early return below, MustAlias is not 926 // used further. 927 Result = ModRefInfo::ModRef; 928 break; 929 } 930 931 // No operand aliases, reset Must bit. Add below if at least one aliases 932 // and all aliases found are MustAlias. 933 if (isNoModRef(Result)) 934 IsMustAlias = false; 935 936 // Early return if we improved mod ref information 937 if (!isModAndRefSet(Result)) { 938 if (isNoModRef(Result)) 939 return ModRefInfo::NoModRef; 940 return IsMustAlias ? setMust(Result) : clearMust(Result); 941 } 942 } 943 944 // If the call is malloc/calloc like, we can assume that it doesn't 945 // modify any IR visible value. This is only valid because we assume these 946 // routines do not read values visible in the IR. TODO: Consider special 947 // casing realloc and strdup routines which access only their arguments as 948 // well. Or alternatively, replace all of this with inaccessiblememonly once 949 // that's implemented fully. 950 if (isMallocOrCallocLikeFn(Call, &TLI)) { 951 // Be conservative if the accessed pointer may alias the allocation - 952 // fallback to the generic handling below. 953 if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc, 954 AAQI) == AliasResult::NoAlias) 955 return ModRefInfo::NoModRef; 956 } 957 958 // The semantics of memcpy intrinsics either exactly overlap or do not 959 // overlap, i.e., source and destination of any given memcpy are either 960 // no-alias or must-alias. 961 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) { 962 AliasResult SrcAA = 963 getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI); 964 AliasResult DestAA = 965 getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI); 966 // It's also possible for Loc to alias both src and dest, or neither. 967 ModRefInfo rv = ModRefInfo::NoModRef; 968 if (SrcAA != AliasResult::NoAlias) 969 rv = setRef(rv); 970 if (DestAA != AliasResult::NoAlias) 971 rv = setMod(rv); 972 return rv; 973 } 974 975 // Guard intrinsics are marked as arbitrarily writing so that proper control 976 // dependencies are maintained but they never mods any particular memory 977 // location. 978 // 979 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 980 // heap state at the point the guard is issued needs to be consistent in case 981 // the guard invokes the "deopt" continuation. 982 if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) 983 return ModRefInfo::Ref; 984 // The same applies to deoptimize which is essentially a guard(false). 985 if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize)) 986 return ModRefInfo::Ref; 987 988 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 989 // writing so that proper control dependencies are maintained but they never 990 // mod any particular memory location visible to the IR. 991 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 992 // intrinsic is now modeled as reading memory. This prevents hoisting the 993 // invariant.start intrinsic over stores. Consider: 994 // *ptr = 40; 995 // *ptr = 50; 996 // invariant_start(ptr) 997 // int val = *ptr; 998 // print(val); 999 // 1000 // This cannot be transformed to: 1001 // 1002 // *ptr = 40; 1003 // invariant_start(ptr) 1004 // *ptr = 50; 1005 // int val = *ptr; 1006 // print(val); 1007 // 1008 // The transformation will cause the second store to be ignored (based on 1009 // rules of invariant.start) and print 40, while the first program always 1010 // prints 50. 1011 if (isIntrinsicCall(Call, Intrinsic::invariant_start)) 1012 return ModRefInfo::Ref; 1013 1014 // The AAResultBase base class has some smarts, lets use them. 1015 return AAResultBase::getModRefInfo(Call, Loc, AAQI); 1016 } 1017 1018 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, 1019 const CallBase *Call2, 1020 AAQueryInfo &AAQI) { 1021 // Guard intrinsics are marked as arbitrarily writing so that proper control 1022 // dependencies are maintained but they never mods any particular memory 1023 // location. 1024 // 1025 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1026 // heap state at the point the guard is issued needs to be consistent in case 1027 // the guard invokes the "deopt" continuation. 1028 1029 // NB! This function is *not* commutative, so we special case two 1030 // possibilities for guard intrinsics. 1031 1032 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) 1033 return isModSet(createModRefInfo(getModRefBehavior(Call2))) 1034 ? ModRefInfo::Ref 1035 : ModRefInfo::NoModRef; 1036 1037 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) 1038 return isModSet(createModRefInfo(getModRefBehavior(Call1))) 1039 ? ModRefInfo::Mod 1040 : ModRefInfo::NoModRef; 1041 1042 // The AAResultBase base class has some smarts, lets use them. 1043 return AAResultBase::getModRefInfo(Call1, Call2, AAQI); 1044 } 1045 1046 /// Return true if we know V to the base address of the corresponding memory 1047 /// object. This implies that any address less than V must be out of bounds 1048 /// for the underlying object. Note that just being isIdentifiedObject() is 1049 /// not enough - For example, a negative offset from a noalias argument or call 1050 /// can be inbounds w.r.t the actual underlying object. 1051 static bool isBaseOfObject(const Value *V) { 1052 // TODO: We can handle other cases here 1053 // 1) For GC languages, arguments to functions are often required to be 1054 // base pointers. 1055 // 2) Result of allocation routines are often base pointers. Leverage TLI. 1056 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V)); 1057 } 1058 1059 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1060 /// another pointer. 1061 /// 1062 /// We know that V1 is a GEP, but we don't know anything about V2. 1063 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for 1064 /// V2. 1065 AliasResult BasicAAResult::aliasGEP( 1066 const GEPOperator *GEP1, LocationSize V1Size, 1067 const Value *V2, LocationSize V2Size, 1068 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { 1069 if (!V1Size.hasValue() && !V2Size.hasValue()) { 1070 // TODO: This limitation exists for compile-time reasons. Relax it if we 1071 // can avoid exponential pathological cases. 1072 if (!isa<GEPOperator>(V2)) 1073 return AliasResult::MayAlias; 1074 1075 // If both accesses have unknown size, we can only check whether the base 1076 // objects don't alias. 1077 AliasResult BaseAlias = getBestAAResults().alias( 1078 MemoryLocation::getBeforeOrAfter(UnderlyingV1), 1079 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI); 1080 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias 1081 : AliasResult::MayAlias; 1082 } 1083 1084 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT); 1085 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT); 1086 1087 // Don't attempt to analyze the decomposed GEP if index scale is not a 1088 // compile-time constant. 1089 if (!DecompGEP1.HasCompileTimeConstantScale || 1090 !DecompGEP2.HasCompileTimeConstantScale) 1091 return AliasResult::MayAlias; 1092 1093 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 && 1094 "DecomposeGEPExpression returned a result different from " 1095 "getUnderlyingObject"); 1096 1097 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1098 // symbolic difference. 1099 DecompGEP1.Offset -= DecompGEP2.Offset; 1100 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices); 1101 1102 // If an inbounds GEP would have to start from an out of bounds address 1103 // for the two to alias, then we can assume noalias. 1104 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() && 1105 V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) && 1106 isBaseOfObject(DecompGEP2.Base)) 1107 return AliasResult::NoAlias; 1108 1109 if (isa<GEPOperator>(V2)) { 1110 // Symmetric case to above. 1111 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() && 1112 V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) && 1113 isBaseOfObject(DecompGEP1.Base)) 1114 return AliasResult::NoAlias; 1115 } 1116 1117 // For GEPs with identical offsets, we can preserve the size and AAInfo 1118 // when performing the alias check on the underlying objects. 1119 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty()) 1120 return getBestAAResults().alias( 1121 MemoryLocation(UnderlyingV1, V1Size), 1122 MemoryLocation(UnderlyingV2, V2Size), AAQI); 1123 1124 // Do the base pointers alias? 1125 AliasResult BaseAlias = getBestAAResults().alias( 1126 MemoryLocation::getBeforeOrAfter(UnderlyingV1), 1127 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI); 1128 1129 // If we get a No or May, then return it immediately, no amount of analysis 1130 // will improve this situation. 1131 if (BaseAlias != AliasResult::MustAlias) { 1132 assert(BaseAlias == AliasResult::NoAlias || 1133 BaseAlias == AliasResult::MayAlias); 1134 return BaseAlias; 1135 } 1136 1137 // If there is a constant difference between the pointers, but the difference 1138 // is less than the size of the associated memory object, then we know 1139 // that the objects are partially overlapping. If the difference is 1140 // greater, we know they do not overlap. 1141 if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) { 1142 APInt &Off = DecompGEP1.Offset; 1143 1144 // Initialize for Off >= 0 (V2 <= GEP1) case. 1145 const Value *LeftPtr = V2; 1146 const Value *RightPtr = GEP1; 1147 LocationSize VLeftSize = V2Size; 1148 LocationSize VRightSize = V1Size; 1149 const bool Swapped = Off.isNegative(); 1150 1151 if (Swapped) { 1152 // Swap if we have the situation where: 1153 // + + 1154 // | BaseOffset | 1155 // ---------------->| 1156 // |-->V1Size |-------> V2Size 1157 // GEP1 V2 1158 std::swap(LeftPtr, RightPtr); 1159 std::swap(VLeftSize, VRightSize); 1160 Off = -Off; 1161 } 1162 1163 if (VLeftSize.hasValue()) { 1164 const uint64_t LSize = VLeftSize.getValue(); 1165 if (Off.ult(LSize)) { 1166 // Conservatively drop processing if a phi was visited and/or offset is 1167 // too big. 1168 AliasResult AR = AliasResult::PartialAlias; 1169 if (VRightSize.hasValue() && Off.ule(INT32_MAX) && 1170 (Off + VRightSize.getValue()).ule(LSize)) { 1171 // Memory referenced by right pointer is nested. Save the offset in 1172 // cache. Note that originally offset estimated as GEP1-V2, but 1173 // AliasResult contains the shift that represents GEP1+Offset=V2. 1174 AR.setOffset(-Off.getSExtValue()); 1175 AR.swap(Swapped); 1176 } 1177 return AR; 1178 } 1179 return AliasResult::NoAlias; 1180 } 1181 } 1182 1183 if (!DecompGEP1.VarIndices.empty()) { 1184 APInt GCD; 1185 bool AllNonNegative = DecompGEP1.Offset.isNonNegative(); 1186 bool AllNonPositive = DecompGEP1.Offset.isNonPositive(); 1187 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1188 APInt Scale = DecompGEP1.VarIndices[i].Scale; 1189 APInt ScaleForGCD = DecompGEP1.VarIndices[i].Scale; 1190 if (!DecompGEP1.VarIndices[i].IsNSW) 1191 ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(), 1192 Scale.countTrailingZeros()); 1193 1194 if (i == 0) 1195 GCD = ScaleForGCD.abs(); 1196 else 1197 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs()); 1198 1199 if (AllNonNegative || AllNonPositive) { 1200 // If the Value could change between cycles, then any reasoning about 1201 // the Value this cycle may not hold in the next cycle. We'll just 1202 // give up if we can't determine conditions that hold for every cycle: 1203 const Value *V = DecompGEP1.VarIndices[i].V; 1204 const Instruction *CxtI = DecompGEP1.VarIndices[i].CxtI; 1205 1206 KnownBits Known = computeKnownBits(V, DL, 0, &AC, CxtI, DT); 1207 bool SignKnownZero = Known.isNonNegative(); 1208 bool SignKnownOne = Known.isNegative(); 1209 1210 // Zero-extension widens the variable, and so forces the sign 1211 // bit to zero. 1212 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V); 1213 SignKnownZero |= IsZExt; 1214 SignKnownOne &= !IsZExt; 1215 1216 AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) || 1217 (SignKnownOne && Scale.isNonPositive()); 1218 AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) || 1219 (SignKnownOne && Scale.isNonNegative()); 1220 } 1221 } 1222 1223 // We now have accesses at two offsets from the same base: 1224 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size 1225 // 2. 0 with size V2Size 1226 // Using arithmetic modulo GCD, the accesses are at 1227 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits 1228 // into the range [V2Size..GCD), then we know they cannot overlap. 1229 APInt ModOffset = DecompGEP1.Offset.srem(GCD); 1230 if (ModOffset.isNegative()) 1231 ModOffset += GCD; // We want mod, not rem. 1232 if (V1Size.hasValue() && V2Size.hasValue() && 1233 ModOffset.uge(V2Size.getValue()) && 1234 (GCD - ModOffset).uge(V1Size.getValue())) 1235 return AliasResult::NoAlias; 1236 1237 // If we know all the variables are non-negative, then the total offset is 1238 // also non-negative and >= DecompGEP1.Offset. We have the following layout: 1239 // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size] 1240 // If DecompGEP1.Offset >= V2Size, the accesses don't alias. 1241 if (AllNonNegative && V2Size.hasValue() && 1242 DecompGEP1.Offset.uge(V2Size.getValue())) 1243 return AliasResult::NoAlias; 1244 // Similarly, if the variables are non-positive, then the total offset is 1245 // also non-positive and <= DecompGEP1.Offset. We have the following layout: 1246 // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size) 1247 // If -DecompGEP1.Offset >= V1Size, the accesses don't alias. 1248 if (AllNonPositive && V1Size.hasValue() && 1249 (-DecompGEP1.Offset).uge(V1Size.getValue())) 1250 return AliasResult::NoAlias; 1251 1252 if (V1Size.hasValue() && V2Size.hasValue()) { 1253 // Try to determine whether abs(VarIndex) > 0. 1254 Optional<APInt> MinAbsVarIndex; 1255 if (DecompGEP1.VarIndices.size() == 1) { 1256 // VarIndex = Scale*V. If V != 0 then abs(VarIndex) >= abs(Scale). 1257 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0]; 1258 if (isKnownNonZero(Var.V, DL, 0, &AC, Var.CxtI, DT)) 1259 MinAbsVarIndex = Var.Scale.abs(); 1260 } else if (DecompGEP1.VarIndices.size() == 2) { 1261 // VarIndex = Scale*V0 + (-Scale)*V1. 1262 // If V0 != V1 then abs(VarIndex) >= abs(Scale). 1263 // Check that VisitedPhiBBs is empty, to avoid reasoning about 1264 // inequality of values across loop iterations. 1265 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0]; 1266 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1]; 1267 if (Var0.Scale == -Var1.Scale && Var0.ZExtBits == Var1.ZExtBits && 1268 Var0.SExtBits == Var1.SExtBits && VisitedPhiBBs.empty() && 1269 isKnownNonEqual(Var0.V, Var1.V, DL, &AC, /* CxtI */ nullptr, DT)) 1270 MinAbsVarIndex = Var0.Scale.abs(); 1271 } 1272 1273 if (MinAbsVarIndex) { 1274 // The constant offset will have added at least +/-MinAbsVarIndex to it. 1275 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex; 1276 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex; 1277 // Check that an access at OffsetLo or lower, and an access at OffsetHi 1278 // or higher both do not alias. 1279 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) && 1280 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue())) 1281 return AliasResult::NoAlias; 1282 } 1283 } 1284 1285 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size, 1286 DecompGEP1.Offset, &AC, DT)) 1287 return AliasResult::NoAlias; 1288 } 1289 1290 // Statically, we can see that the base objects are the same, but the 1291 // pointers have dynamic offsets which we can't resolve. And none of our 1292 // little tricks above worked. 1293 return AliasResult::MayAlias; 1294 } 1295 1296 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1297 // If the results agree, take it. 1298 if (A == B) 1299 return A; 1300 // A mix of PartialAlias and MustAlias is PartialAlias. 1301 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) || 1302 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias)) 1303 return AliasResult::PartialAlias; 1304 // Otherwise, we don't know anything. 1305 return AliasResult::MayAlias; 1306 } 1307 1308 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1309 /// against another. 1310 AliasResult 1311 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, 1312 const Value *V2, LocationSize V2Size, 1313 AAQueryInfo &AAQI) { 1314 // If the values are Selects with the same condition, we can do a more precise 1315 // check: just check for aliases between the values on corresponding arms. 1316 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1317 if (SI->getCondition() == SI2->getCondition()) { 1318 AliasResult Alias = getBestAAResults().alias( 1319 MemoryLocation(SI->getTrueValue(), SISize), 1320 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI); 1321 if (Alias == AliasResult::MayAlias) 1322 return AliasResult::MayAlias; 1323 AliasResult ThisAlias = getBestAAResults().alias( 1324 MemoryLocation(SI->getFalseValue(), SISize), 1325 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI); 1326 return MergeAliasResults(ThisAlias, Alias); 1327 } 1328 1329 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1330 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1331 AliasResult Alias = getBestAAResults().alias( 1332 MemoryLocation(V2, V2Size), 1333 MemoryLocation(SI->getTrueValue(), SISize), AAQI); 1334 if (Alias == AliasResult::MayAlias) 1335 return AliasResult::MayAlias; 1336 1337 AliasResult ThisAlias = getBestAAResults().alias( 1338 MemoryLocation(V2, V2Size), 1339 MemoryLocation(SI->getFalseValue(), SISize), AAQI); 1340 return MergeAliasResults(ThisAlias, Alias); 1341 } 1342 1343 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1344 /// another. 1345 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, 1346 const Value *V2, LocationSize V2Size, 1347 AAQueryInfo &AAQI) { 1348 if (!PN->getNumIncomingValues()) 1349 return AliasResult::NoAlias; 1350 // If the values are PHIs in the same block, we can do a more precise 1351 // as well as efficient check: just check for aliases between the values 1352 // on corresponding edges. 1353 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1354 if (PN2->getParent() == PN->getParent()) { 1355 Optional<AliasResult> Alias; 1356 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1357 AliasResult ThisAlias = getBestAAResults().alias( 1358 MemoryLocation(PN->getIncomingValue(i), PNSize), 1359 MemoryLocation( 1360 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size), 1361 AAQI); 1362 if (Alias) 1363 *Alias = MergeAliasResults(*Alias, ThisAlias); 1364 else 1365 Alias = ThisAlias; 1366 if (*Alias == AliasResult::MayAlias) 1367 break; 1368 } 1369 return *Alias; 1370 } 1371 1372 SmallVector<Value *, 4> V1Srcs; 1373 // If a phi operand recurses back to the phi, we can still determine NoAlias 1374 // if we don't alias the underlying objects of the other phi operands, as we 1375 // know that the recursive phi needs to be based on them in some way. 1376 bool isRecursive = false; 1377 auto CheckForRecPhi = [&](Value *PV) { 1378 if (!EnableRecPhiAnalysis) 1379 return false; 1380 if (getUnderlyingObject(PV) == PN) { 1381 isRecursive = true; 1382 return true; 1383 } 1384 return false; 1385 }; 1386 1387 if (PV) { 1388 // If we have PhiValues then use it to get the underlying phi values. 1389 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN); 1390 // If we have more phi values than the search depth then return MayAlias 1391 // conservatively to avoid compile time explosion. The worst possible case 1392 // is if both sides are PHI nodes. In which case, this is O(m x n) time 1393 // where 'm' and 'n' are the number of PHI sources. 1394 if (PhiValueSet.size() > MaxLookupSearchDepth) 1395 return AliasResult::MayAlias; 1396 // Add the values to V1Srcs 1397 for (Value *PV1 : PhiValueSet) { 1398 if (CheckForRecPhi(PV1)) 1399 continue; 1400 V1Srcs.push_back(PV1); 1401 } 1402 } else { 1403 // If we don't have PhiInfo then just look at the operands of the phi itself 1404 // FIXME: Remove this once we can guarantee that we have PhiInfo always 1405 SmallPtrSet<Value *, 4> UniqueSrc; 1406 Value *OnePhi = nullptr; 1407 for (Value *PV1 : PN->incoming_values()) { 1408 if (isa<PHINode>(PV1)) { 1409 if (OnePhi && OnePhi != PV1) { 1410 // To control potential compile time explosion, we choose to be 1411 // conserviate when we have more than one Phi input. It is important 1412 // that we handle the single phi case as that lets us handle LCSSA 1413 // phi nodes and (combined with the recursive phi handling) simple 1414 // pointer induction variable patterns. 1415 return AliasResult::MayAlias; 1416 } 1417 OnePhi = PV1; 1418 } 1419 1420 if (CheckForRecPhi(PV1)) 1421 continue; 1422 1423 if (UniqueSrc.insert(PV1).second) 1424 V1Srcs.push_back(PV1); 1425 } 1426 1427 if (OnePhi && UniqueSrc.size() > 1) 1428 // Out of an abundance of caution, allow only the trivial lcssa and 1429 // recursive phi cases. 1430 return AliasResult::MayAlias; 1431 } 1432 1433 // If V1Srcs is empty then that means that the phi has no underlying non-phi 1434 // value. This should only be possible in blocks unreachable from the entry 1435 // block, but return MayAlias just in case. 1436 if (V1Srcs.empty()) 1437 return AliasResult::MayAlias; 1438 1439 // If this PHI node is recursive, indicate that the pointer may be moved 1440 // across iterations. We can only prove NoAlias if different underlying 1441 // objects are involved. 1442 if (isRecursive) 1443 PNSize = LocationSize::beforeOrAfterPointer(); 1444 1445 // In the recursive alias queries below, we may compare values from two 1446 // different loop iterations. Keep track of visited phi blocks, which will 1447 // be used when determining value equivalence. 1448 bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second; 1449 auto _ = make_scope_exit([&]() { 1450 if (BlockInserted) 1451 VisitedPhiBBs.erase(PN->getParent()); 1452 }); 1453 1454 // If we inserted a block into VisitedPhiBBs, alias analysis results that 1455 // have been cached earlier may no longer be valid. Perform recursive queries 1456 // with a new AAQueryInfo. 1457 AAQueryInfo NewAAQI = AAQI.withEmptyCache(); 1458 AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI; 1459 1460 AliasResult Alias = getBestAAResults().alias( 1461 MemoryLocation(V2, V2Size), 1462 MemoryLocation(V1Srcs[0], PNSize), *UseAAQI); 1463 1464 // Early exit if the check of the first PHI source against V2 is MayAlias. 1465 // Other results are not possible. 1466 if (Alias == AliasResult::MayAlias) 1467 return AliasResult::MayAlias; 1468 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will 1469 // remain valid to all elements and needs to conservatively return MayAlias. 1470 if (isRecursive && Alias != AliasResult::NoAlias) 1471 return AliasResult::MayAlias; 1472 1473 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1474 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1475 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1476 Value *V = V1Srcs[i]; 1477 1478 AliasResult ThisAlias = getBestAAResults().alias( 1479 MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI); 1480 Alias = MergeAliasResults(ThisAlias, Alias); 1481 if (Alias == AliasResult::MayAlias) 1482 break; 1483 } 1484 1485 return Alias; 1486 } 1487 1488 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1489 /// array references. 1490 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, 1491 const Value *V2, LocationSize V2Size, 1492 AAQueryInfo &AAQI) { 1493 // If either of the memory references is empty, it doesn't matter what the 1494 // pointer values are. 1495 if (V1Size.isZero() || V2Size.isZero()) 1496 return AliasResult::NoAlias; 1497 1498 // Strip off any casts if they exist. 1499 V1 = V1->stripPointerCastsForAliasAnalysis(); 1500 V2 = V2->stripPointerCastsForAliasAnalysis(); 1501 1502 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1503 // value for undef that aliases nothing in the program. 1504 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1505 return AliasResult::NoAlias; 1506 1507 // Are we checking for alias of the same value? 1508 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1509 // different iterations. We must therefore make sure that this is not the 1510 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1511 // happen by looking at the visited phi nodes and making sure they cannot 1512 // reach the value. 1513 if (isValueEqualInPotentialCycles(V1, V2)) 1514 return AliasResult::MustAlias; 1515 1516 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1517 return AliasResult::NoAlias; // Scalars cannot alias each other 1518 1519 // Figure out what objects these things are pointing to if we can. 1520 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth); 1521 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth); 1522 1523 // Null values in the default address space don't point to any object, so they 1524 // don't alias any other pointer. 1525 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1526 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1527 return AliasResult::NoAlias; 1528 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1529 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1530 return AliasResult::NoAlias; 1531 1532 if (O1 != O2) { 1533 // If V1/V2 point to two different objects, we know that we have no alias. 1534 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1535 return AliasResult::NoAlias; 1536 1537 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1538 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1539 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1540 return AliasResult::NoAlias; 1541 1542 // Function arguments can't alias with things that are known to be 1543 // unambigously identified at the function level. 1544 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1545 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1546 return AliasResult::NoAlias; 1547 1548 // If one pointer is the result of a call/invoke or load and the other is a 1549 // non-escaping local object within the same function, then we know the 1550 // object couldn't escape to a point where the call could return it. 1551 // 1552 // Note that if the pointers are in different functions, there are a 1553 // variety of complications. A call with a nocapture argument may still 1554 // temporary store the nocapture argument's value in a temporary memory 1555 // location if that memory location doesn't escape. Or it may pass a 1556 // nocapture value to other functions as long as they don't capture it. 1557 if (isEscapeSource(O1) && 1558 AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1))) 1559 return AliasResult::NoAlias; 1560 if (isEscapeSource(O2) && 1561 AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2))) 1562 return AliasResult::NoAlias; 1563 } 1564 1565 // If the size of one access is larger than the entire object on the other 1566 // side, then we know such behavior is undefined and can assume no alias. 1567 bool NullIsValidLocation = NullPointerIsDefined(&F); 1568 if ((isObjectSmallerThan( 1569 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL, 1570 TLI, NullIsValidLocation)) || 1571 (isObjectSmallerThan( 1572 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL, 1573 TLI, NullIsValidLocation))) 1574 return AliasResult::NoAlias; 1575 1576 // If one the accesses may be before the accessed pointer, canonicalize this 1577 // by using unknown after-pointer sizes for both accesses. This is 1578 // equivalent, because regardless of which pointer is lower, one of them 1579 // will always came after the other, as long as the underlying objects aren't 1580 // disjoint. We do this so that the rest of BasicAA does not have to deal 1581 // with accesses before the base pointer, and to improve cache utilization by 1582 // merging equivalent states. 1583 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) { 1584 V1Size = LocationSize::afterPointer(); 1585 V2Size = LocationSize::afterPointer(); 1586 } 1587 1588 // FIXME: If this depth limit is hit, then we may cache sub-optimal results 1589 // for recursive queries. For this reason, this limit is chosen to be large 1590 // enough to be very rarely hit, while still being small enough to avoid 1591 // stack overflows. 1592 if (AAQI.Depth >= 512) 1593 return AliasResult::MayAlias; 1594 1595 // Check the cache before climbing up use-def chains. This also terminates 1596 // otherwise infinitely recursive queries. 1597 AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size}); 1598 const bool Swapped = V1 > V2; 1599 if (Swapped) 1600 std::swap(Locs.first, Locs.second); 1601 const auto &Pair = AAQI.AliasCache.try_emplace( 1602 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0}); 1603 if (!Pair.second) { 1604 auto &Entry = Pair.first->second; 1605 if (!Entry.isDefinitive()) { 1606 // Remember that we used an assumption. 1607 ++Entry.NumAssumptionUses; 1608 ++AAQI.NumAssumptionUses; 1609 } 1610 // Cache contains sorted {V1,V2} pairs but we should return original order. 1611 auto Result = Entry.Result; 1612 Result.swap(Swapped); 1613 return Result; 1614 } 1615 1616 int OrigNumAssumptionUses = AAQI.NumAssumptionUses; 1617 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size(); 1618 AliasResult Result = 1619 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2); 1620 1621 auto It = AAQI.AliasCache.find(Locs); 1622 assert(It != AAQI.AliasCache.end() && "Must be in cache"); 1623 auto &Entry = It->second; 1624 1625 // Check whether a NoAlias assumption has been used, but disproven. 1626 bool AssumptionDisproven = 1627 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias; 1628 if (AssumptionDisproven) 1629 Result = AliasResult::MayAlias; 1630 1631 // This is a definitive result now, when considered as a root query. 1632 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses; 1633 Entry.Result = Result; 1634 // Cache contains sorted {V1,V2} pairs. 1635 Entry.Result.swap(Swapped); 1636 Entry.NumAssumptionUses = -1; 1637 1638 // If the assumption has been disproven, remove any results that may have 1639 // been based on this assumption. Do this after the Entry updates above to 1640 // avoid iterator invalidation. 1641 if (AssumptionDisproven) 1642 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults) 1643 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val()); 1644 1645 // The result may still be based on assumptions higher up in the chain. 1646 // Remember it, so it can be purged from the cache later. 1647 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses && 1648 Result != AliasResult::MayAlias) 1649 AAQI.AssumptionBasedResults.push_back(Locs); 1650 return Result; 1651 } 1652 1653 AliasResult BasicAAResult::aliasCheckRecursive( 1654 const Value *V1, LocationSize V1Size, 1655 const Value *V2, LocationSize V2Size, 1656 AAQueryInfo &AAQI, const Value *O1, const Value *O2) { 1657 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1658 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI); 1659 if (Result != AliasResult::MayAlias) 1660 return Result; 1661 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) { 1662 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI); 1663 if (Result != AliasResult::MayAlias) 1664 return Result; 1665 } 1666 1667 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1668 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI); 1669 if (Result != AliasResult::MayAlias) 1670 return Result; 1671 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) { 1672 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI); 1673 if (Result != AliasResult::MayAlias) 1674 return Result; 1675 } 1676 1677 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1678 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI); 1679 if (Result != AliasResult::MayAlias) 1680 return Result; 1681 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) { 1682 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI); 1683 if (Result != AliasResult::MayAlias) 1684 return Result; 1685 } 1686 1687 // If both pointers are pointing into the same object and one of them 1688 // accesses the entire object, then the accesses must overlap in some way. 1689 if (O1 == O2) { 1690 bool NullIsValidLocation = NullPointerIsDefined(&F); 1691 if (V1Size.isPrecise() && V2Size.isPrecise() && 1692 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || 1693 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) 1694 return AliasResult::PartialAlias; 1695 } 1696 1697 return AliasResult::MayAlias; 1698 } 1699 1700 /// Check whether two Values can be considered equivalent. 1701 /// 1702 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1703 /// they can not be part of a cycle in the value graph by looking at all 1704 /// visited phi nodes an making sure that the phis cannot reach the value. We 1705 /// have to do this because we are looking through phi nodes (That is we say 1706 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1707 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1708 const Value *V2) { 1709 if (V != V2) 1710 return false; 1711 1712 const Instruction *Inst = dyn_cast<Instruction>(V); 1713 if (!Inst) 1714 return true; 1715 1716 if (VisitedPhiBBs.empty()) 1717 return true; 1718 1719 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1720 return false; 1721 1722 // Make sure that the visited phis cannot reach the Value. This ensures that 1723 // the Values cannot come from different iterations of a potential cycle the 1724 // phi nodes could be involved in. 1725 for (auto *P : VisitedPhiBBs) 1726 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT)) 1727 return false; 1728 1729 return true; 1730 } 1731 1732 /// Computes the symbolic difference between two de-composed GEPs. 1733 /// 1734 /// Dest and Src are the variable indices from two decomposed GetElementPtr 1735 /// instructions GEP1 and GEP2 which have common base pointers. 1736 void BasicAAResult::GetIndexDifference( 1737 SmallVectorImpl<VariableGEPIndex> &Dest, 1738 const SmallVectorImpl<VariableGEPIndex> &Src) { 1739 if (Src.empty()) 1740 return; 1741 1742 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 1743 const Value *V = Src[i].V; 1744 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits; 1745 APInt Scale = Src[i].Scale; 1746 1747 // Find V in Dest. This is N^2, but pointer indices almost never have more 1748 // than a few variable indexes. 1749 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 1750 if (!isValueEqualInPotentialCycles(Dest[j].V, V) || 1751 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits) 1752 continue; 1753 1754 // If we found it, subtract off Scale V's from the entry in Dest. If it 1755 // goes to zero, remove the entry. 1756 if (Dest[j].Scale != Scale) { 1757 Dest[j].Scale -= Scale; 1758 Dest[j].IsNSW = false; 1759 } else 1760 Dest.erase(Dest.begin() + j); 1761 Scale = 0; 1762 break; 1763 } 1764 1765 // If we didn't consume this entry, add it to the end of the Dest list. 1766 if (!!Scale) { 1767 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, 1768 -Scale, Src[i].CxtI, Src[i].IsNSW}; 1769 Dest.push_back(Entry); 1770 } 1771 } 1772 } 1773 1774 bool BasicAAResult::constantOffsetHeuristic( 1775 const SmallVectorImpl<VariableGEPIndex> &VarIndices, 1776 LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset, 1777 AssumptionCache *AC, DominatorTree *DT) { 1778 if (VarIndices.size() != 2 || !MaybeV1Size.hasValue() || 1779 !MaybeV2Size.hasValue()) 1780 return false; 1781 1782 const uint64_t V1Size = MaybeV1Size.getValue(); 1783 const uint64_t V2Size = MaybeV2Size.getValue(); 1784 1785 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1]; 1786 1787 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits || 1788 Var0.Scale != -Var1.Scale || Var0.V->getType() != Var1.V->getType()) 1789 return false; 1790 1791 // We'll strip off the Extensions of Var0 and Var1 and do another round 1792 // of GetLinearExpression decomposition. In the example above, if Var0 1793 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 1794 1795 LinearExpression E0 = 1796 GetLinearExpression(ExtendedValue(Var0.V), DL, 0, AC, DT); 1797 LinearExpression E1 = 1798 GetLinearExpression(ExtendedValue(Var1.V), DL, 0, AC, DT); 1799 if (E0.Scale != E1.Scale || E0.Val.ZExtBits != E1.Val.ZExtBits || 1800 E0.Val.SExtBits != E1.Val.SExtBits || 1801 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V)) 1802 return false; 1803 1804 // We have a hit - Var0 and Var1 only differ by a constant offset! 1805 1806 // If we've been sext'ed then zext'd the maximum difference between Var0 and 1807 // Var1 is possible to calculate, but we're just interested in the absolute 1808 // minimum difference between the two. The minimum distance may occur due to 1809 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 1810 // the minimum distance between %i and %i + 5 is 3. 1811 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff; 1812 MinDiff = APIntOps::umin(MinDiff, Wrapped); 1813 APInt MinDiffBytes = 1814 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs(); 1815 1816 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 1817 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 1818 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 1819 // V2Size can fit in the MinDiffBytes gap. 1820 return MinDiffBytes.uge(V1Size + BaseOffset.abs()) && 1821 MinDiffBytes.uge(V2Size + BaseOffset.abs()); 1822 } 1823 1824 //===----------------------------------------------------------------------===// 1825 // BasicAliasAnalysis Pass 1826 //===----------------------------------------------------------------------===// 1827 1828 AnalysisKey BasicAA::Key; 1829 1830 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 1831 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1832 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1833 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1834 auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F); 1835 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV); 1836 } 1837 1838 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 1839 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 1840 } 1841 1842 char BasicAAWrapperPass::ID = 0; 1843 1844 void BasicAAWrapperPass::anchor() {} 1845 1846 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", 1847 "Basic Alias Analysis (stateless AA impl)", true, true) 1848 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1849 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1850 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1851 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 1852 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa", 1853 "Basic Alias Analysis (stateless AA impl)", true, true) 1854 1855 FunctionPass *llvm::createBasicAAWrapperPass() { 1856 return new BasicAAWrapperPass(); 1857 } 1858 1859 bool BasicAAWrapperPass::runOnFunction(Function &F) { 1860 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 1861 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 1862 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 1863 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>(); 1864 1865 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, 1866 TLIWP.getTLI(F), ACT.getAssumptionCache(F), 1867 &DTWP.getDomTree(), 1868 PVWP ? &PVWP->getResult() : nullptr)); 1869 1870 return false; 1871 } 1872 1873 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1874 AU.setPreservesAll(); 1875 AU.addRequiredTransitive<AssumptionCacheTracker>(); 1876 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 1877 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 1878 AU.addUsedIfAvailable<PhiValuesWrapperPass>(); 1879 } 1880 1881 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 1882 return BasicAAResult( 1883 F.getParent()->getDataLayout(), F, 1884 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 1885 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 1886 } 1887