1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the primary stateless implementation of the 10 // Alias Analysis interface that implements identities (two different 11 // globals cannot alias, etc), but does no stateful analysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/BasicAliasAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ScopeExit.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/CFG.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/MemoryLocation.h" 28 #include "llvm/Analysis/PhiValues.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/ConstantRange.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Dominators.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GetElementPtrTypeIterator.h" 41 #include "llvm/IR/GlobalAlias.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/Metadata.h" 49 #include "llvm/IR/Operator.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Compiler.h" 58 #include "llvm/Support/KnownBits.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <cstdlib> 62 #include <utility> 63 64 #define DEBUG_TYPE "basicaa" 65 66 using namespace llvm; 67 68 /// Enable analysis of recursive PHI nodes. 69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, 70 cl::init(true)); 71 72 /// SearchLimitReached / SearchTimes shows how often the limit of 73 /// to decompose GEPs is reached. It will affect the precision 74 /// of basic alias analysis. 75 STATISTIC(SearchLimitReached, "Number of times the limit to " 76 "decompose GEPs is reached"); 77 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 78 79 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 80 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 81 /// careful with value equivalence. We use reachability to make sure a value 82 /// cannot be involved in a cycle. 83 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 84 85 // The max limit of the search depth in DecomposeGEPExpression() and 86 // getUnderlyingObject(). 87 static const unsigned MaxLookupSearchDepth = 6; 88 89 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, 90 FunctionAnalysisManager::Invalidator &Inv) { 91 // We don't care if this analysis itself is preserved, it has no state. But 92 // we need to check that the analyses it depends on have been. Note that we 93 // may be created without handles to some analyses and in that case don't 94 // depend on them. 95 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) || 96 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) || 97 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA))) 98 return true; 99 100 // Otherwise this analysis result remains valid. 101 return false; 102 } 103 104 //===----------------------------------------------------------------------===// 105 // Useful predicates 106 //===----------------------------------------------------------------------===// 107 108 /// Returns true if the pointer is one which would have been considered an 109 /// escape by isNonEscapingLocalObject. 110 static bool isEscapeSource(const Value *V) { 111 if (isa<CallBase>(V)) 112 return true; 113 114 // The load case works because isNonEscapingLocalObject considers all 115 // stores to be escapes (it passes true for the StoreCaptures argument 116 // to PointerMayBeCaptured). 117 if (isa<LoadInst>(V)) 118 return true; 119 120 // The inttoptr case works because isNonEscapingLocalObject considers all 121 // means of converting or equating a pointer to an int (ptrtoint, ptr store 122 // which could be followed by an integer load, ptr<->int compare) as 123 // escaping, and objects located at well-known addresses via platform-specific 124 // means cannot be considered non-escaping local objects. 125 if (isa<IntToPtrInst>(V)) 126 return true; 127 128 return false; 129 } 130 131 /// Returns the size of the object specified by V or UnknownSize if unknown. 132 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 133 const TargetLibraryInfo &TLI, 134 bool NullIsValidLoc, 135 bool RoundToAlign = false) { 136 uint64_t Size; 137 ObjectSizeOpts Opts; 138 Opts.RoundToAlign = RoundToAlign; 139 Opts.NullIsUnknownSize = NullIsValidLoc; 140 if (getObjectSize(V, Size, DL, &TLI, Opts)) 141 return Size; 142 return MemoryLocation::UnknownSize; 143 } 144 145 /// Returns true if we can prove that the object specified by V is smaller than 146 /// Size. 147 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 148 const DataLayout &DL, 149 const TargetLibraryInfo &TLI, 150 bool NullIsValidLoc) { 151 // Note that the meanings of the "object" are slightly different in the 152 // following contexts: 153 // c1: llvm::getObjectSize() 154 // c2: llvm.objectsize() intrinsic 155 // c3: isObjectSmallerThan() 156 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 157 // refers to the "entire object". 158 // 159 // Consider this example: 160 // char *p = (char*)malloc(100) 161 // char *q = p+80; 162 // 163 // In the context of c1 and c2, the "object" pointed by q refers to the 164 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 165 // 166 // However, in the context of c3, the "object" refers to the chunk of memory 167 // being allocated. So, the "object" has 100 bytes, and q points to the middle 168 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 169 // parameter, before the llvm::getObjectSize() is called to get the size of 170 // entire object, we should: 171 // - either rewind the pointer q to the base-address of the object in 172 // question (in this case rewind to p), or 173 // - just give up. It is up to caller to make sure the pointer is pointing 174 // to the base address the object. 175 // 176 // We go for 2nd option for simplicity. 177 if (!isIdentifiedObject(V)) 178 return false; 179 180 // This function needs to use the aligned object size because we allow 181 // reads a bit past the end given sufficient alignment. 182 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, 183 /*RoundToAlign*/ true); 184 185 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 186 } 187 188 /// Return the minimal extent from \p V to the end of the underlying object, 189 /// assuming the result is used in an aliasing query. E.g., we do use the query 190 /// location size and the fact that null pointers cannot alias here. 191 static uint64_t getMinimalExtentFrom(const Value &V, 192 const LocationSize &LocSize, 193 const DataLayout &DL, 194 bool NullIsValidLoc) { 195 // If we have dereferenceability information we know a lower bound for the 196 // extent as accesses for a lower offset would be valid. We need to exclude 197 // the "or null" part if null is a valid pointer. We can ignore frees, as an 198 // access after free would be undefined behavior. 199 bool CanBeNull, CanBeFreed; 200 uint64_t DerefBytes = 201 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 202 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; 203 // If queried with a precise location size, we assume that location size to be 204 // accessed, thus valid. 205 if (LocSize.isPrecise()) 206 DerefBytes = std::max(DerefBytes, LocSize.getValue()); 207 return DerefBytes; 208 } 209 210 /// Returns true if we can prove that the object specified by V has size Size. 211 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 212 const TargetLibraryInfo &TLI, bool NullIsValidLoc) { 213 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc); 214 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 215 } 216 217 //===----------------------------------------------------------------------===// 218 // CaptureInfo implementations 219 //===----------------------------------------------------------------------===// 220 221 CaptureInfo::~CaptureInfo() = default; 222 223 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object, 224 const Instruction *I) { 225 return isNonEscapingLocalObject(Object, &IsCapturedCache); 226 } 227 228 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object, 229 const Instruction *I) { 230 if (!isIdentifiedFunctionLocal(Object)) 231 return false; 232 233 auto Iter = EarliestEscapes.insert({Object, nullptr}); 234 if (Iter.second) { 235 Instruction *EarliestCapture = FindEarliestCapture( 236 Object, *const_cast<Function *>(I->getFunction()), 237 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT); 238 if (EarliestCapture) { 239 auto Ins = Inst2Obj.insert({EarliestCapture, {}}); 240 Ins.first->second.push_back(Object); 241 } 242 Iter.first->second = EarliestCapture; 243 } 244 245 // No capturing instruction. 246 if (!Iter.first->second) 247 return true; 248 249 return I != Iter.first->second && 250 !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI); 251 } 252 253 void EarliestEscapeInfo::removeInstruction(Instruction *I) { 254 auto Iter = Inst2Obj.find(I); 255 if (Iter != Inst2Obj.end()) { 256 for (const Value *Obj : Iter->second) 257 EarliestEscapes.erase(Obj); 258 Inst2Obj.erase(I); 259 } 260 } 261 262 //===----------------------------------------------------------------------===// 263 // GetElementPtr Instruction Decomposition and Analysis 264 //===----------------------------------------------------------------------===// 265 266 namespace { 267 /// Represents zext(sext(trunc(V))). 268 struct CastedValue { 269 const Value *V; 270 unsigned ZExtBits = 0; 271 unsigned SExtBits = 0; 272 unsigned TruncBits = 0; 273 274 explicit CastedValue(const Value *V) : V(V) {} 275 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits, 276 unsigned TruncBits) 277 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {} 278 279 unsigned getBitWidth() const { 280 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits + 281 SExtBits; 282 } 283 284 CastedValue withValue(const Value *NewV) const { 285 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits); 286 } 287 288 /// Replace V with zext(NewV) 289 CastedValue withZExtOfValue(const Value *NewV) const { 290 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - 291 NewV->getType()->getPrimitiveSizeInBits(); 292 if (ExtendBy <= TruncBits) 293 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy); 294 295 // zext(sext(zext(NewV))) == zext(zext(zext(NewV))) 296 ExtendBy -= TruncBits; 297 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0); 298 } 299 300 /// Replace V with sext(NewV) 301 CastedValue withSExtOfValue(const Value *NewV) const { 302 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - 303 NewV->getType()->getPrimitiveSizeInBits(); 304 if (ExtendBy <= TruncBits) 305 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy); 306 307 // zext(sext(sext(NewV))) 308 ExtendBy -= TruncBits; 309 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0); 310 } 311 312 APInt evaluateWith(APInt N) const { 313 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 314 "Incompatible bit width"); 315 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits); 316 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits); 317 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits); 318 return N; 319 } 320 321 ConstantRange evaluateWith(ConstantRange N) const { 322 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 323 "Incompatible bit width"); 324 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits); 325 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits); 326 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits); 327 return N; 328 } 329 330 bool canDistributeOver(bool NUW, bool NSW) const { 331 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y) 332 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y) 333 // trunc(x op y) == trunc(x) op trunc(y) 334 return (!ZExtBits || NUW) && (!SExtBits || NSW); 335 } 336 337 bool hasSameCastsAs(const CastedValue &Other) const { 338 return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits && 339 TruncBits == Other.TruncBits; 340 } 341 }; 342 343 /// Represents zext(sext(trunc(V))) * Scale + Offset. 344 struct LinearExpression { 345 CastedValue Val; 346 APInt Scale; 347 APInt Offset; 348 349 /// True if all operations in this expression are NSW. 350 bool IsNSW; 351 352 LinearExpression(const CastedValue &Val, const APInt &Scale, 353 const APInt &Offset, bool IsNSW) 354 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {} 355 356 LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) { 357 unsigned BitWidth = Val.getBitWidth(); 358 Scale = APInt(BitWidth, 1); 359 Offset = APInt(BitWidth, 0); 360 } 361 362 LinearExpression mul(const APInt &Other, bool MulIsNSW) const { 363 return LinearExpression(Val, Scale * Other, Offset * Other, 364 IsNSW && (Other.isOne() || MulIsNSW)); 365 } 366 }; 367 } 368 369 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 370 /// B are constant integers. 371 static LinearExpression GetLinearExpression( 372 const CastedValue &Val, const DataLayout &DL, unsigned Depth, 373 AssumptionCache *AC, DominatorTree *DT) { 374 // Limit our recursion depth. 375 if (Depth == 6) 376 return Val; 377 378 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V)) 379 return LinearExpression(Val, APInt(Val.getBitWidth(), 0), 380 Val.evaluateWith(Const->getValue()), true); 381 382 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) { 383 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 384 APInt RHS = Val.evaluateWith(RHSC->getValue()); 385 // The only non-OBO case we deal with is or, and only limited to the 386 // case where it is both nuw and nsw. 387 bool NUW = true, NSW = true; 388 if (isa<OverflowingBinaryOperator>(BOp)) { 389 NUW &= BOp->hasNoUnsignedWrap(); 390 NSW &= BOp->hasNoSignedWrap(); 391 } 392 if (!Val.canDistributeOver(NUW, NSW)) 393 return Val; 394 395 // While we can distribute over trunc, we cannot preserve nowrap flags 396 // in that case. 397 if (Val.TruncBits) 398 NUW = NSW = false; 399 400 LinearExpression E(Val); 401 switch (BOp->getOpcode()) { 402 default: 403 // We don't understand this instruction, so we can't decompose it any 404 // further. 405 return Val; 406 case Instruction::Or: 407 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 408 // analyze it. 409 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 410 BOp, DT)) 411 return Val; 412 413 LLVM_FALLTHROUGH; 414 case Instruction::Add: { 415 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 416 Depth + 1, AC, DT); 417 E.Offset += RHS; 418 E.IsNSW &= NSW; 419 break; 420 } 421 case Instruction::Sub: { 422 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 423 Depth + 1, AC, DT); 424 E.Offset -= RHS; 425 E.IsNSW &= NSW; 426 break; 427 } 428 case Instruction::Mul: 429 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 430 Depth + 1, AC, DT) 431 .mul(RHS, NSW); 432 break; 433 case Instruction::Shl: 434 // We're trying to linearize an expression of the kind: 435 // shl i8 -128, 36 436 // where the shift count exceeds the bitwidth of the type. 437 // We can't decompose this further (the expression would return 438 // a poison value). 439 if (RHS.getLimitedValue() > Val.getBitWidth()) 440 return Val; 441 442 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 443 Depth + 1, AC, DT); 444 E.Offset <<= RHS.getLimitedValue(); 445 E.Scale <<= RHS.getLimitedValue(); 446 E.IsNSW &= NSW; 447 break; 448 } 449 return E; 450 } 451 } 452 453 if (isa<ZExtInst>(Val.V)) 454 return GetLinearExpression( 455 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), 456 DL, Depth + 1, AC, DT); 457 458 if (isa<SExtInst>(Val.V)) 459 return GetLinearExpression( 460 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), 461 DL, Depth + 1, AC, DT); 462 463 return Val; 464 } 465 466 /// To ensure a pointer offset fits in an integer of size PointerSize 467 /// (in bits) when that size is smaller than the maximum pointer size. This is 468 /// an issue, for example, in particular for 32b pointers with negative indices 469 /// that rely on two's complement wrap-arounds for precise alias information 470 /// where the maximum pointer size is 64b. 471 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) { 472 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!"); 473 unsigned ShiftBits = Offset.getBitWidth() - PointerSize; 474 return (Offset << ShiftBits).ashr(ShiftBits); 475 } 476 477 namespace { 478 // A linear transformation of a Value; this class represents 479 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale. 480 struct VariableGEPIndex { 481 CastedValue Val; 482 APInt Scale; 483 484 // Context instruction to use when querying information about this index. 485 const Instruction *CxtI; 486 487 /// True if all operations in this expression are NSW. 488 bool IsNSW; 489 490 void dump() const { 491 print(dbgs()); 492 dbgs() << "\n"; 493 } 494 void print(raw_ostream &OS) const { 495 OS << "(V=" << Val.V->getName() 496 << ", zextbits=" << Val.ZExtBits 497 << ", sextbits=" << Val.SExtBits 498 << ", truncbits=" << Val.TruncBits 499 << ", scale=" << Scale << ")"; 500 } 501 }; 502 } 503 504 // Represents the internal structure of a GEP, decomposed into a base pointer, 505 // constant offsets, and variable scaled indices. 506 struct BasicAAResult::DecomposedGEP { 507 // Base pointer of the GEP 508 const Value *Base; 509 // Total constant offset from base. 510 APInt Offset; 511 // Scaled variable (non-constant) indices. 512 SmallVector<VariableGEPIndex, 4> VarIndices; 513 // Are all operations inbounds GEPs or non-indexing operations? 514 // (None iff expression doesn't involve any geps) 515 Optional<bool> InBounds; 516 517 void dump() const { 518 print(dbgs()); 519 dbgs() << "\n"; 520 } 521 void print(raw_ostream &OS) const { 522 OS << "(DecomposedGEP Base=" << Base->getName() 523 << ", Offset=" << Offset 524 << ", VarIndices=["; 525 for (size_t i = 0; i < VarIndices.size(); i++) { 526 if (i != 0) 527 OS << ", "; 528 VarIndices[i].print(OS); 529 } 530 OS << "])"; 531 } 532 }; 533 534 535 /// If V is a symbolic pointer expression, decompose it into a base pointer 536 /// with a constant offset and a number of scaled symbolic offsets. 537 /// 538 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 539 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 540 /// specified amount, but which may have other unrepresented high bits. As 541 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 542 BasicAAResult::DecomposedGEP 543 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL, 544 AssumptionCache *AC, DominatorTree *DT) { 545 // Limit recursion depth to limit compile time in crazy cases. 546 unsigned MaxLookup = MaxLookupSearchDepth; 547 SearchTimes++; 548 const Instruction *CxtI = dyn_cast<Instruction>(V); 549 550 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits(); 551 DecomposedGEP Decomposed; 552 Decomposed.Offset = APInt(MaxPointerSize, 0); 553 do { 554 // See if this is a bitcast or GEP. 555 const Operator *Op = dyn_cast<Operator>(V); 556 if (!Op) { 557 // The only non-operator case we can handle are GlobalAliases. 558 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 559 if (!GA->isInterposable()) { 560 V = GA->getAliasee(); 561 continue; 562 } 563 } 564 Decomposed.Base = V; 565 return Decomposed; 566 } 567 568 if (Op->getOpcode() == Instruction::BitCast || 569 Op->getOpcode() == Instruction::AddrSpaceCast) { 570 V = Op->getOperand(0); 571 continue; 572 } 573 574 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 575 if (!GEPOp) { 576 if (const auto *PHI = dyn_cast<PHINode>(V)) { 577 // Look through single-arg phi nodes created by LCSSA. 578 if (PHI->getNumIncomingValues() == 1) { 579 V = PHI->getIncomingValue(0); 580 continue; 581 } 582 } else if (const auto *Call = dyn_cast<CallBase>(V)) { 583 // CaptureTracking can know about special capturing properties of some 584 // intrinsics like launder.invariant.group, that can't be expressed with 585 // the attributes, but have properties like returning aliasing pointer. 586 // Because some analysis may assume that nocaptured pointer is not 587 // returned from some special intrinsic (because function would have to 588 // be marked with returns attribute), it is crucial to use this function 589 // because it should be in sync with CaptureTracking. Not using it may 590 // cause weird miscompilations where 2 aliasing pointers are assumed to 591 // noalias. 592 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 593 V = RP; 594 continue; 595 } 596 } 597 598 Decomposed.Base = V; 599 return Decomposed; 600 } 601 602 // Track whether we've seen at least one in bounds gep, and if so, whether 603 // all geps parsed were in bounds. 604 if (Decomposed.InBounds == None) 605 Decomposed.InBounds = GEPOp->isInBounds(); 606 else if (!GEPOp->isInBounds()) 607 Decomposed.InBounds = false; 608 609 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized"); 610 611 // Don't attempt to analyze GEPs if index scale is not a compile-time 612 // constant. 613 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) { 614 Decomposed.Base = V; 615 return Decomposed; 616 } 617 618 unsigned AS = GEPOp->getPointerAddressSpace(); 619 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 620 gep_type_iterator GTI = gep_type_begin(GEPOp); 621 unsigned PointerSize = DL.getPointerSizeInBits(AS); 622 // Assume all GEP operands are constants until proven otherwise. 623 bool GepHasConstantOffset = true; 624 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 625 I != E; ++I, ++GTI) { 626 const Value *Index = *I; 627 // Compute the (potentially symbolic) offset in bytes for this index. 628 if (StructType *STy = GTI.getStructTypeOrNull()) { 629 // For a struct, add the member offset. 630 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 631 if (FieldNo == 0) 632 continue; 633 634 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo); 635 continue; 636 } 637 638 // For an array/pointer, add the element offset, explicitly scaled. 639 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 640 if (CIdx->isZero()) 641 continue; 642 Decomposed.Offset += 643 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() * 644 CIdx->getValue().sextOrTrunc(MaxPointerSize); 645 continue; 646 } 647 648 GepHasConstantOffset = false; 649 650 // If the integer type is smaller than the pointer size, it is implicitly 651 // sign extended to pointer size. 652 unsigned Width = Index->getType()->getIntegerBitWidth(); 653 unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0; 654 unsigned TruncBits = PointerSize < Width ? Width - PointerSize : 0; 655 LinearExpression LE = GetLinearExpression( 656 CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT); 657 658 // Scale by the type size. 659 unsigned TypeSize = 660 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize(); 661 LE = LE.mul(APInt(PointerSize, TypeSize), GEPOp->isInBounds()); 662 Decomposed.Offset += LE.Offset.sextOrSelf(MaxPointerSize); 663 APInt Scale = LE.Scale.sextOrSelf(MaxPointerSize); 664 665 // If we already had an occurrence of this index variable, merge this 666 // scale into it. For example, we want to handle: 667 // A[x][x] -> x*16 + x*4 -> x*20 668 // This also ensures that 'x' only appears in the index list once. 669 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 670 if (Decomposed.VarIndices[i].Val.V == LE.Val.V && 671 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) { 672 Scale += Decomposed.VarIndices[i].Scale; 673 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 674 break; 675 } 676 } 677 678 // Make sure that we have a scale that makes sense for this target's 679 // pointer size. 680 Scale = adjustToPointerSize(Scale, PointerSize); 681 682 if (!!Scale) { 683 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW}; 684 Decomposed.VarIndices.push_back(Entry); 685 } 686 } 687 688 // Take care of wrap-arounds 689 if (GepHasConstantOffset) 690 Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize); 691 692 // Analyze the base pointer next. 693 V = GEPOp->getOperand(0); 694 } while (--MaxLookup); 695 696 // If the chain of expressions is too deep, just return early. 697 Decomposed.Base = V; 698 SearchLimitReached++; 699 return Decomposed; 700 } 701 702 /// Returns whether the given pointer value points to memory that is local to 703 /// the function, with global constants being considered local to all 704 /// functions. 705 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 706 AAQueryInfo &AAQI, bool OrLocal) { 707 assert(Visited.empty() && "Visited must be cleared after use!"); 708 709 unsigned MaxLookup = 8; 710 SmallVector<const Value *, 16> Worklist; 711 Worklist.push_back(Loc.Ptr); 712 do { 713 const Value *V = getUnderlyingObject(Worklist.pop_back_val()); 714 if (!Visited.insert(V).second) { 715 Visited.clear(); 716 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 717 } 718 719 // An alloca instruction defines local memory. 720 if (OrLocal && isa<AllocaInst>(V)) 721 continue; 722 723 // A global constant counts as local memory for our purposes. 724 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 725 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 726 // global to be marked constant in some modules and non-constant in 727 // others. GV may even be a declaration, not a definition. 728 if (!GV->isConstant()) { 729 Visited.clear(); 730 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 731 } 732 continue; 733 } 734 735 // If both select values point to local memory, then so does the select. 736 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 737 Worklist.push_back(SI->getTrueValue()); 738 Worklist.push_back(SI->getFalseValue()); 739 continue; 740 } 741 742 // If all values incoming to a phi node point to local memory, then so does 743 // the phi. 744 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 745 // Don't bother inspecting phi nodes with many operands. 746 if (PN->getNumIncomingValues() > MaxLookup) { 747 Visited.clear(); 748 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 749 } 750 append_range(Worklist, PN->incoming_values()); 751 continue; 752 } 753 754 // Otherwise be conservative. 755 Visited.clear(); 756 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 757 } while (!Worklist.empty() && --MaxLookup); 758 759 Visited.clear(); 760 return Worklist.empty(); 761 } 762 763 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { 764 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call); 765 return II && II->getIntrinsicID() == IID; 766 } 767 768 /// Returns the behavior when calling the given call site. 769 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { 770 if (Call->doesNotAccessMemory()) 771 // Can't do better than this. 772 return FMRB_DoesNotAccessMemory; 773 774 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 775 776 // If the callsite knows it only reads memory, don't return worse 777 // than that. 778 if (Call->onlyReadsMemory()) 779 Min = FMRB_OnlyReadsMemory; 780 else if (Call->doesNotReadMemory()) 781 Min = FMRB_OnlyWritesMemory; 782 783 if (Call->onlyAccessesArgMemory()) 784 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 785 else if (Call->onlyAccessesInaccessibleMemory()) 786 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 787 else if (Call->onlyAccessesInaccessibleMemOrArgMem()) 788 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 789 790 // If the call has operand bundles then aliasing attributes from the function 791 // it calls do not directly apply to the call. This can be made more precise 792 // in the future. 793 if (!Call->hasOperandBundles()) 794 if (const Function *F = Call->getCalledFunction()) 795 Min = 796 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 797 798 return Min; 799 } 800 801 /// Returns the behavior when calling the given function. For use when the call 802 /// site is not known. 803 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 804 // If the function declares it doesn't access memory, we can't do better. 805 if (F->doesNotAccessMemory()) 806 return FMRB_DoesNotAccessMemory; 807 808 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 809 810 // If the function declares it only reads memory, go with that. 811 if (F->onlyReadsMemory()) 812 Min = FMRB_OnlyReadsMemory; 813 else if (F->doesNotReadMemory()) 814 Min = FMRB_OnlyWritesMemory; 815 816 if (F->onlyAccessesArgMemory()) 817 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 818 else if (F->onlyAccessesInaccessibleMemory()) 819 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 820 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 821 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 822 823 return Min; 824 } 825 826 /// Returns true if this is a writeonly (i.e Mod only) parameter. 827 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, 828 const TargetLibraryInfo &TLI) { 829 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) 830 return true; 831 832 // We can bound the aliasing properties of memset_pattern16 just as we can 833 // for memcpy/memset. This is particularly important because the 834 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 835 // whenever possible. 836 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 837 // attributes. 838 LibFunc F; 839 if (Call->getCalledFunction() && 840 TLI.getLibFunc(*Call->getCalledFunction(), F) && 841 F == LibFunc_memset_pattern16 && TLI.has(F)) 842 if (ArgIdx == 0) 843 return true; 844 845 // TODO: memset_pattern4, memset_pattern8 846 // TODO: _chk variants 847 // TODO: strcmp, strcpy 848 849 return false; 850 } 851 852 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, 853 unsigned ArgIdx) { 854 // Checking for known builtin intrinsics and target library functions. 855 if (isWriteOnlyParam(Call, ArgIdx, TLI)) 856 return ModRefInfo::Mod; 857 858 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) 859 return ModRefInfo::Ref; 860 861 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) 862 return ModRefInfo::NoModRef; 863 864 return AAResultBase::getArgModRefInfo(Call, ArgIdx); 865 } 866 867 #ifndef NDEBUG 868 static const Function *getParent(const Value *V) { 869 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 870 if (!inst->getParent()) 871 return nullptr; 872 return inst->getParent()->getParent(); 873 } 874 875 if (const Argument *arg = dyn_cast<Argument>(V)) 876 return arg->getParent(); 877 878 return nullptr; 879 } 880 881 static bool notDifferentParent(const Value *O1, const Value *O2) { 882 883 const Function *F1 = getParent(O1); 884 const Function *F2 = getParent(O2); 885 886 return !F1 || !F2 || F1 == F2; 887 } 888 #endif 889 890 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 891 const MemoryLocation &LocB, 892 AAQueryInfo &AAQI) { 893 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 894 "BasicAliasAnalysis doesn't support interprocedural queries."); 895 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI); 896 } 897 898 /// Checks to see if the specified callsite can clobber the specified memory 899 /// object. 900 /// 901 /// Since we only look at local properties of this function, we really can't 902 /// say much about this query. We do, however, use simple "address taken" 903 /// analysis on local objects. 904 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, 905 const MemoryLocation &Loc, 906 AAQueryInfo &AAQI) { 907 assert(notDifferentParent(Call, Loc.Ptr) && 908 "AliasAnalysis query involving multiple functions!"); 909 910 const Value *Object = getUnderlyingObject(Loc.Ptr); 911 912 // Calls marked 'tail' cannot read or write allocas from the current frame 913 // because the current frame might be destroyed by the time they run. However, 914 // a tail call may use an alloca with byval. Calling with byval copies the 915 // contents of the alloca into argument registers or stack slots, so there is 916 // no lifetime issue. 917 if (isa<AllocaInst>(Object)) 918 if (const CallInst *CI = dyn_cast<CallInst>(Call)) 919 if (CI->isTailCall() && 920 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 921 return ModRefInfo::NoModRef; 922 923 // Stack restore is able to modify unescaped dynamic allocas. Assume it may 924 // modify them even though the alloca is not escaped. 925 if (auto *AI = dyn_cast<AllocaInst>(Object)) 926 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore)) 927 return ModRefInfo::Mod; 928 929 // If the pointer is to a locally allocated object that does not escape, 930 // then the call can not mod/ref the pointer unless the call takes the pointer 931 // as an argument, and itself doesn't capture it. 932 if (!isa<Constant>(Object) && Call != Object && 933 AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) { 934 935 // Optimistically assume that call doesn't touch Object and check this 936 // assumption in the following loop. 937 ModRefInfo Result = ModRefInfo::NoModRef; 938 bool IsMustAlias = true; 939 940 unsigned OperandNo = 0; 941 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); 942 CI != CE; ++CI, ++OperandNo) { 943 // Only look at the no-capture or byval pointer arguments. If this 944 // pointer were passed to arguments that were neither of these, then it 945 // couldn't be no-capture. 946 if (!(*CI)->getType()->isPointerTy() || 947 (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() && 948 !Call->isByValArgument(OperandNo))) 949 continue; 950 951 // Call doesn't access memory through this operand, so we don't care 952 // if it aliases with Object. 953 if (Call->doesNotAccessMemory(OperandNo)) 954 continue; 955 956 // If this is a no-capture pointer argument, see if we can tell that it 957 // is impossible to alias the pointer we're checking. 958 AliasResult AR = getBestAAResults().alias( 959 MemoryLocation::getBeforeOrAfter(*CI), 960 MemoryLocation::getBeforeOrAfter(Object), AAQI); 961 if (AR != AliasResult::MustAlias) 962 IsMustAlias = false; 963 // Operand doesn't alias 'Object', continue looking for other aliases 964 if (AR == AliasResult::NoAlias) 965 continue; 966 // Operand aliases 'Object', but call doesn't modify it. Strengthen 967 // initial assumption and keep looking in case if there are more aliases. 968 if (Call->onlyReadsMemory(OperandNo)) { 969 Result = setRef(Result); 970 continue; 971 } 972 // Operand aliases 'Object' but call only writes into it. 973 if (Call->doesNotReadMemory(OperandNo)) { 974 Result = setMod(Result); 975 continue; 976 } 977 // This operand aliases 'Object' and call reads and writes into it. 978 // Setting ModRef will not yield an early return below, MustAlias is not 979 // used further. 980 Result = ModRefInfo::ModRef; 981 break; 982 } 983 984 // No operand aliases, reset Must bit. Add below if at least one aliases 985 // and all aliases found are MustAlias. 986 if (isNoModRef(Result)) 987 IsMustAlias = false; 988 989 // Early return if we improved mod ref information 990 if (!isModAndRefSet(Result)) { 991 if (isNoModRef(Result)) 992 return ModRefInfo::NoModRef; 993 return IsMustAlias ? setMust(Result) : clearMust(Result); 994 } 995 } 996 997 // If the call is malloc/calloc like, we can assume that it doesn't 998 // modify any IR visible value. This is only valid because we assume these 999 // routines do not read values visible in the IR. TODO: Consider special 1000 // casing realloc and strdup routines which access only their arguments as 1001 // well. Or alternatively, replace all of this with inaccessiblememonly once 1002 // that's implemented fully. 1003 if (isMallocOrCallocLikeFn(Call, &TLI)) { 1004 // Be conservative if the accessed pointer may alias the allocation - 1005 // fallback to the generic handling below. 1006 if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc, 1007 AAQI) == AliasResult::NoAlias) 1008 return ModRefInfo::NoModRef; 1009 } 1010 1011 // The semantics of memcpy intrinsics either exactly overlap or do not 1012 // overlap, i.e., source and destination of any given memcpy are either 1013 // no-alias or must-alias. 1014 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) { 1015 AliasResult SrcAA = 1016 getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI); 1017 AliasResult DestAA = 1018 getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI); 1019 // It's also possible for Loc to alias both src and dest, or neither. 1020 ModRefInfo rv = ModRefInfo::NoModRef; 1021 if (SrcAA != AliasResult::NoAlias) 1022 rv = setRef(rv); 1023 if (DestAA != AliasResult::NoAlias) 1024 rv = setMod(rv); 1025 return rv; 1026 } 1027 1028 // Guard intrinsics are marked as arbitrarily writing so that proper control 1029 // dependencies are maintained but they never mods any particular memory 1030 // location. 1031 // 1032 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1033 // heap state at the point the guard is issued needs to be consistent in case 1034 // the guard invokes the "deopt" continuation. 1035 if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) 1036 return ModRefInfo::Ref; 1037 // The same applies to deoptimize which is essentially a guard(false). 1038 if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize)) 1039 return ModRefInfo::Ref; 1040 1041 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 1042 // writing so that proper control dependencies are maintained but they never 1043 // mod any particular memory location visible to the IR. 1044 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 1045 // intrinsic is now modeled as reading memory. This prevents hoisting the 1046 // invariant.start intrinsic over stores. Consider: 1047 // *ptr = 40; 1048 // *ptr = 50; 1049 // invariant_start(ptr) 1050 // int val = *ptr; 1051 // print(val); 1052 // 1053 // This cannot be transformed to: 1054 // 1055 // *ptr = 40; 1056 // invariant_start(ptr) 1057 // *ptr = 50; 1058 // int val = *ptr; 1059 // print(val); 1060 // 1061 // The transformation will cause the second store to be ignored (based on 1062 // rules of invariant.start) and print 40, while the first program always 1063 // prints 50. 1064 if (isIntrinsicCall(Call, Intrinsic::invariant_start)) 1065 return ModRefInfo::Ref; 1066 1067 // The AAResultBase base class has some smarts, lets use them. 1068 return AAResultBase::getModRefInfo(Call, Loc, AAQI); 1069 } 1070 1071 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, 1072 const CallBase *Call2, 1073 AAQueryInfo &AAQI) { 1074 // Guard intrinsics are marked as arbitrarily writing so that proper control 1075 // dependencies are maintained but they never mods any particular memory 1076 // location. 1077 // 1078 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1079 // heap state at the point the guard is issued needs to be consistent in case 1080 // the guard invokes the "deopt" continuation. 1081 1082 // NB! This function is *not* commutative, so we special case two 1083 // possibilities for guard intrinsics. 1084 1085 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) 1086 return isModSet(createModRefInfo(getModRefBehavior(Call2))) 1087 ? ModRefInfo::Ref 1088 : ModRefInfo::NoModRef; 1089 1090 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) 1091 return isModSet(createModRefInfo(getModRefBehavior(Call1))) 1092 ? ModRefInfo::Mod 1093 : ModRefInfo::NoModRef; 1094 1095 // The AAResultBase base class has some smarts, lets use them. 1096 return AAResultBase::getModRefInfo(Call1, Call2, AAQI); 1097 } 1098 1099 /// Return true if we know V to the base address of the corresponding memory 1100 /// object. This implies that any address less than V must be out of bounds 1101 /// for the underlying object. Note that just being isIdentifiedObject() is 1102 /// not enough - For example, a negative offset from a noalias argument or call 1103 /// can be inbounds w.r.t the actual underlying object. 1104 static bool isBaseOfObject(const Value *V) { 1105 // TODO: We can handle other cases here 1106 // 1) For GC languages, arguments to functions are often required to be 1107 // base pointers. 1108 // 2) Result of allocation routines are often base pointers. Leverage TLI. 1109 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V)); 1110 } 1111 1112 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1113 /// another pointer. 1114 /// 1115 /// We know that V1 is a GEP, but we don't know anything about V2. 1116 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for 1117 /// V2. 1118 AliasResult BasicAAResult::aliasGEP( 1119 const GEPOperator *GEP1, LocationSize V1Size, 1120 const Value *V2, LocationSize V2Size, 1121 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { 1122 if (!V1Size.hasValue() && !V2Size.hasValue()) { 1123 // TODO: This limitation exists for compile-time reasons. Relax it if we 1124 // can avoid exponential pathological cases. 1125 if (!isa<GEPOperator>(V2)) 1126 return AliasResult::MayAlias; 1127 1128 // If both accesses have unknown size, we can only check whether the base 1129 // objects don't alias. 1130 AliasResult BaseAlias = getBestAAResults().alias( 1131 MemoryLocation::getBeforeOrAfter(UnderlyingV1), 1132 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI); 1133 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias 1134 : AliasResult::MayAlias; 1135 } 1136 1137 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT); 1138 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT); 1139 1140 // Bail if we were not able to decompose anything. 1141 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2) 1142 return AliasResult::MayAlias; 1143 1144 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1145 // symbolic difference. 1146 subtractDecomposedGEPs(DecompGEP1, DecompGEP2); 1147 1148 // If an inbounds GEP would have to start from an out of bounds address 1149 // for the two to alias, then we can assume noalias. 1150 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() && 1151 V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) && 1152 isBaseOfObject(DecompGEP2.Base)) 1153 return AliasResult::NoAlias; 1154 1155 if (isa<GEPOperator>(V2)) { 1156 // Symmetric case to above. 1157 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() && 1158 V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) && 1159 isBaseOfObject(DecompGEP1.Base)) 1160 return AliasResult::NoAlias; 1161 } 1162 1163 // For GEPs with identical offsets, we can preserve the size and AAInfo 1164 // when performing the alias check on the underlying objects. 1165 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty()) 1166 return getBestAAResults().alias(MemoryLocation(DecompGEP1.Base, V1Size), 1167 MemoryLocation(DecompGEP2.Base, V2Size), 1168 AAQI); 1169 1170 // Do the base pointers alias? 1171 AliasResult BaseAlias = getBestAAResults().alias( 1172 MemoryLocation::getBeforeOrAfter(DecompGEP1.Base), 1173 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI); 1174 1175 // If we get a No or May, then return it immediately, no amount of analysis 1176 // will improve this situation. 1177 if (BaseAlias != AliasResult::MustAlias) { 1178 assert(BaseAlias == AliasResult::NoAlias || 1179 BaseAlias == AliasResult::MayAlias); 1180 return BaseAlias; 1181 } 1182 1183 // If there is a constant difference between the pointers, but the difference 1184 // is less than the size of the associated memory object, then we know 1185 // that the objects are partially overlapping. If the difference is 1186 // greater, we know they do not overlap. 1187 if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) { 1188 APInt &Off = DecompGEP1.Offset; 1189 1190 // Initialize for Off >= 0 (V2 <= GEP1) case. 1191 const Value *LeftPtr = V2; 1192 const Value *RightPtr = GEP1; 1193 LocationSize VLeftSize = V2Size; 1194 LocationSize VRightSize = V1Size; 1195 const bool Swapped = Off.isNegative(); 1196 1197 if (Swapped) { 1198 // Swap if we have the situation where: 1199 // + + 1200 // | BaseOffset | 1201 // ---------------->| 1202 // |-->V1Size |-------> V2Size 1203 // GEP1 V2 1204 std::swap(LeftPtr, RightPtr); 1205 std::swap(VLeftSize, VRightSize); 1206 Off = -Off; 1207 } 1208 1209 if (VLeftSize.hasValue()) { 1210 const uint64_t LSize = VLeftSize.getValue(); 1211 if (Off.ult(LSize)) { 1212 // Conservatively drop processing if a phi was visited and/or offset is 1213 // too big. 1214 AliasResult AR = AliasResult::PartialAlias; 1215 if (VRightSize.hasValue() && Off.ule(INT32_MAX) && 1216 (Off + VRightSize.getValue()).ule(LSize)) { 1217 // Memory referenced by right pointer is nested. Save the offset in 1218 // cache. Note that originally offset estimated as GEP1-V2, but 1219 // AliasResult contains the shift that represents GEP1+Offset=V2. 1220 AR.setOffset(-Off.getSExtValue()); 1221 AR.swap(Swapped); 1222 } 1223 return AR; 1224 } 1225 return AliasResult::NoAlias; 1226 } 1227 } 1228 1229 if (!DecompGEP1.VarIndices.empty()) { 1230 APInt GCD; 1231 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset); 1232 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1233 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i]; 1234 const APInt &Scale = Index.Scale; 1235 APInt ScaleForGCD = Scale; 1236 if (!Index.IsNSW) 1237 ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(), 1238 Scale.countTrailingZeros()); 1239 1240 if (i == 0) 1241 GCD = ScaleForGCD.abs(); 1242 else 1243 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs()); 1244 1245 ConstantRange CR = 1246 computeConstantRange(Index.Val.V, true, &AC, Index.CxtI); 1247 KnownBits Known = 1248 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT); 1249 CR = CR.intersectWith( 1250 ConstantRange::fromKnownBits(Known, /* Signed */ true), 1251 ConstantRange::Signed); 1252 1253 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() && 1254 "Bit widths are normalized to MaxPointerSize"); 1255 OffsetRange = OffsetRange.add( 1256 Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth()) 1257 .smul_fast(ConstantRange(Scale))); 1258 } 1259 1260 // We now have accesses at two offsets from the same base: 1261 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size 1262 // 2. 0 with size V2Size 1263 // Using arithmetic modulo GCD, the accesses are at 1264 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits 1265 // into the range [V2Size..GCD), then we know they cannot overlap. 1266 APInt ModOffset = DecompGEP1.Offset.srem(GCD); 1267 if (ModOffset.isNegative()) 1268 ModOffset += GCD; // We want mod, not rem. 1269 if (V1Size.hasValue() && V2Size.hasValue() && 1270 ModOffset.uge(V2Size.getValue()) && 1271 (GCD - ModOffset).uge(V1Size.getValue())) 1272 return AliasResult::NoAlias; 1273 1274 if (V1Size.hasValue() && V2Size.hasValue()) { 1275 // Compute ranges of potentially accessed bytes for both accesses. If the 1276 // interseciton is empty, there can be no overlap. 1277 unsigned BW = OffsetRange.getBitWidth(); 1278 ConstantRange Range1 = OffsetRange.add( 1279 ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue()))); 1280 ConstantRange Range2 = 1281 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue())); 1282 if (Range1.intersectWith(Range2).isEmptySet()) 1283 return AliasResult::NoAlias; 1284 } 1285 1286 if (V1Size.hasValue() && V2Size.hasValue()) { 1287 // Try to determine the range of values for VarIndex such that 1288 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex. 1289 Optional<APInt> MinAbsVarIndex; 1290 if (DecompGEP1.VarIndices.size() == 1) { 1291 // VarIndex = Scale*V. 1292 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0]; 1293 if (Var.Val.TruncBits == 0 && 1294 isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) { 1295 // If V != 0 then abs(VarIndex) >= abs(Scale). 1296 MinAbsVarIndex = Var.Scale.abs(); 1297 } 1298 } else if (DecompGEP1.VarIndices.size() == 2) { 1299 // VarIndex = Scale*V0 + (-Scale)*V1. 1300 // If V0 != V1 then abs(VarIndex) >= abs(Scale). 1301 // Check that VisitedPhiBBs is empty, to avoid reasoning about 1302 // inequality of values across loop iterations. 1303 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0]; 1304 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1]; 1305 if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 && 1306 Var0.Val.hasSameCastsAs(Var1.Val) && VisitedPhiBBs.empty() && 1307 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr, 1308 DT)) 1309 MinAbsVarIndex = Var0.Scale.abs(); 1310 } 1311 1312 if (MinAbsVarIndex) { 1313 // The constant offset will have added at least +/-MinAbsVarIndex to it. 1314 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex; 1315 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex; 1316 // We know that Offset <= OffsetLo || Offset >= OffsetHi 1317 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) && 1318 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue())) 1319 return AliasResult::NoAlias; 1320 } 1321 } 1322 1323 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT)) 1324 return AliasResult::NoAlias; 1325 } 1326 1327 // Statically, we can see that the base objects are the same, but the 1328 // pointers have dynamic offsets which we can't resolve. And none of our 1329 // little tricks above worked. 1330 return AliasResult::MayAlias; 1331 } 1332 1333 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1334 // If the results agree, take it. 1335 if (A == B) 1336 return A; 1337 // A mix of PartialAlias and MustAlias is PartialAlias. 1338 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) || 1339 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias)) 1340 return AliasResult::PartialAlias; 1341 // Otherwise, we don't know anything. 1342 return AliasResult::MayAlias; 1343 } 1344 1345 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1346 /// against another. 1347 AliasResult 1348 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, 1349 const Value *V2, LocationSize V2Size, 1350 AAQueryInfo &AAQI) { 1351 // If the values are Selects with the same condition, we can do a more precise 1352 // check: just check for aliases between the values on corresponding arms. 1353 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1354 if (SI->getCondition() == SI2->getCondition()) { 1355 AliasResult Alias = getBestAAResults().alias( 1356 MemoryLocation(SI->getTrueValue(), SISize), 1357 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI); 1358 if (Alias == AliasResult::MayAlias) 1359 return AliasResult::MayAlias; 1360 AliasResult ThisAlias = getBestAAResults().alias( 1361 MemoryLocation(SI->getFalseValue(), SISize), 1362 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI); 1363 return MergeAliasResults(ThisAlias, Alias); 1364 } 1365 1366 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1367 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1368 AliasResult Alias = getBestAAResults().alias( 1369 MemoryLocation(V2, V2Size), 1370 MemoryLocation(SI->getTrueValue(), SISize), AAQI); 1371 if (Alias == AliasResult::MayAlias) 1372 return AliasResult::MayAlias; 1373 1374 AliasResult ThisAlias = getBestAAResults().alias( 1375 MemoryLocation(V2, V2Size), 1376 MemoryLocation(SI->getFalseValue(), SISize), AAQI); 1377 return MergeAliasResults(ThisAlias, Alias); 1378 } 1379 1380 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1381 /// another. 1382 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, 1383 const Value *V2, LocationSize V2Size, 1384 AAQueryInfo &AAQI) { 1385 if (!PN->getNumIncomingValues()) 1386 return AliasResult::NoAlias; 1387 // If the values are PHIs in the same block, we can do a more precise 1388 // as well as efficient check: just check for aliases between the values 1389 // on corresponding edges. 1390 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1391 if (PN2->getParent() == PN->getParent()) { 1392 Optional<AliasResult> Alias; 1393 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1394 AliasResult ThisAlias = getBestAAResults().alias( 1395 MemoryLocation(PN->getIncomingValue(i), PNSize), 1396 MemoryLocation( 1397 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size), 1398 AAQI); 1399 if (Alias) 1400 *Alias = MergeAliasResults(*Alias, ThisAlias); 1401 else 1402 Alias = ThisAlias; 1403 if (*Alias == AliasResult::MayAlias) 1404 break; 1405 } 1406 return *Alias; 1407 } 1408 1409 SmallVector<Value *, 4> V1Srcs; 1410 // If a phi operand recurses back to the phi, we can still determine NoAlias 1411 // if we don't alias the underlying objects of the other phi operands, as we 1412 // know that the recursive phi needs to be based on them in some way. 1413 bool isRecursive = false; 1414 auto CheckForRecPhi = [&](Value *PV) { 1415 if (!EnableRecPhiAnalysis) 1416 return false; 1417 if (getUnderlyingObject(PV) == PN) { 1418 isRecursive = true; 1419 return true; 1420 } 1421 return false; 1422 }; 1423 1424 if (PV) { 1425 // If we have PhiValues then use it to get the underlying phi values. 1426 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN); 1427 // If we have more phi values than the search depth then return MayAlias 1428 // conservatively to avoid compile time explosion. The worst possible case 1429 // is if both sides are PHI nodes. In which case, this is O(m x n) time 1430 // where 'm' and 'n' are the number of PHI sources. 1431 if (PhiValueSet.size() > MaxLookupSearchDepth) 1432 return AliasResult::MayAlias; 1433 // Add the values to V1Srcs 1434 for (Value *PV1 : PhiValueSet) { 1435 if (CheckForRecPhi(PV1)) 1436 continue; 1437 V1Srcs.push_back(PV1); 1438 } 1439 } else { 1440 // If we don't have PhiInfo then just look at the operands of the phi itself 1441 // FIXME: Remove this once we can guarantee that we have PhiInfo always 1442 SmallPtrSet<Value *, 4> UniqueSrc; 1443 Value *OnePhi = nullptr; 1444 for (Value *PV1 : PN->incoming_values()) { 1445 if (isa<PHINode>(PV1)) { 1446 if (OnePhi && OnePhi != PV1) { 1447 // To control potential compile time explosion, we choose to be 1448 // conserviate when we have more than one Phi input. It is important 1449 // that we handle the single phi case as that lets us handle LCSSA 1450 // phi nodes and (combined with the recursive phi handling) simple 1451 // pointer induction variable patterns. 1452 return AliasResult::MayAlias; 1453 } 1454 OnePhi = PV1; 1455 } 1456 1457 if (CheckForRecPhi(PV1)) 1458 continue; 1459 1460 if (UniqueSrc.insert(PV1).second) 1461 V1Srcs.push_back(PV1); 1462 } 1463 1464 if (OnePhi && UniqueSrc.size() > 1) 1465 // Out of an abundance of caution, allow only the trivial lcssa and 1466 // recursive phi cases. 1467 return AliasResult::MayAlias; 1468 } 1469 1470 // If V1Srcs is empty then that means that the phi has no underlying non-phi 1471 // value. This should only be possible in blocks unreachable from the entry 1472 // block, but return MayAlias just in case. 1473 if (V1Srcs.empty()) 1474 return AliasResult::MayAlias; 1475 1476 // If this PHI node is recursive, indicate that the pointer may be moved 1477 // across iterations. We can only prove NoAlias if different underlying 1478 // objects are involved. 1479 if (isRecursive) 1480 PNSize = LocationSize::beforeOrAfterPointer(); 1481 1482 // In the recursive alias queries below, we may compare values from two 1483 // different loop iterations. Keep track of visited phi blocks, which will 1484 // be used when determining value equivalence. 1485 bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second; 1486 auto _ = make_scope_exit([&]() { 1487 if (BlockInserted) 1488 VisitedPhiBBs.erase(PN->getParent()); 1489 }); 1490 1491 // If we inserted a block into VisitedPhiBBs, alias analysis results that 1492 // have been cached earlier may no longer be valid. Perform recursive queries 1493 // with a new AAQueryInfo. 1494 AAQueryInfo NewAAQI = AAQI.withEmptyCache(); 1495 AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI; 1496 1497 AliasResult Alias = getBestAAResults().alias( 1498 MemoryLocation(V2, V2Size), 1499 MemoryLocation(V1Srcs[0], PNSize), *UseAAQI); 1500 1501 // Early exit if the check of the first PHI source against V2 is MayAlias. 1502 // Other results are not possible. 1503 if (Alias == AliasResult::MayAlias) 1504 return AliasResult::MayAlias; 1505 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will 1506 // remain valid to all elements and needs to conservatively return MayAlias. 1507 if (isRecursive && Alias != AliasResult::NoAlias) 1508 return AliasResult::MayAlias; 1509 1510 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1511 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1512 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1513 Value *V = V1Srcs[i]; 1514 1515 AliasResult ThisAlias = getBestAAResults().alias( 1516 MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI); 1517 Alias = MergeAliasResults(ThisAlias, Alias); 1518 if (Alias == AliasResult::MayAlias) 1519 break; 1520 } 1521 1522 return Alias; 1523 } 1524 1525 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1526 /// array references. 1527 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, 1528 const Value *V2, LocationSize V2Size, 1529 AAQueryInfo &AAQI) { 1530 // If either of the memory references is empty, it doesn't matter what the 1531 // pointer values are. 1532 if (V1Size.isZero() || V2Size.isZero()) 1533 return AliasResult::NoAlias; 1534 1535 // Strip off any casts if they exist. 1536 V1 = V1->stripPointerCastsForAliasAnalysis(); 1537 V2 = V2->stripPointerCastsForAliasAnalysis(); 1538 1539 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1540 // value for undef that aliases nothing in the program. 1541 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1542 return AliasResult::NoAlias; 1543 1544 // Are we checking for alias of the same value? 1545 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1546 // different iterations. We must therefore make sure that this is not the 1547 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1548 // happen by looking at the visited phi nodes and making sure they cannot 1549 // reach the value. 1550 if (isValueEqualInPotentialCycles(V1, V2)) 1551 return AliasResult::MustAlias; 1552 1553 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1554 return AliasResult::NoAlias; // Scalars cannot alias each other 1555 1556 // Figure out what objects these things are pointing to if we can. 1557 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth); 1558 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth); 1559 1560 // Null values in the default address space don't point to any object, so they 1561 // don't alias any other pointer. 1562 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1563 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1564 return AliasResult::NoAlias; 1565 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1566 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1567 return AliasResult::NoAlias; 1568 1569 if (O1 != O2) { 1570 // If V1/V2 point to two different objects, we know that we have no alias. 1571 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1572 return AliasResult::NoAlias; 1573 1574 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1575 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1576 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1577 return AliasResult::NoAlias; 1578 1579 // Function arguments can't alias with things that are known to be 1580 // unambigously identified at the function level. 1581 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1582 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1583 return AliasResult::NoAlias; 1584 1585 // If one pointer is the result of a call/invoke or load and the other is a 1586 // non-escaping local object within the same function, then we know the 1587 // object couldn't escape to a point where the call could return it. 1588 // 1589 // Note that if the pointers are in different functions, there are a 1590 // variety of complications. A call with a nocapture argument may still 1591 // temporary store the nocapture argument's value in a temporary memory 1592 // location if that memory location doesn't escape. Or it may pass a 1593 // nocapture value to other functions as long as they don't capture it. 1594 if (isEscapeSource(O1) && 1595 AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1))) 1596 return AliasResult::NoAlias; 1597 if (isEscapeSource(O2) && 1598 AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2))) 1599 return AliasResult::NoAlias; 1600 } 1601 1602 // If the size of one access is larger than the entire object on the other 1603 // side, then we know such behavior is undefined and can assume no alias. 1604 bool NullIsValidLocation = NullPointerIsDefined(&F); 1605 if ((isObjectSmallerThan( 1606 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL, 1607 TLI, NullIsValidLocation)) || 1608 (isObjectSmallerThan( 1609 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL, 1610 TLI, NullIsValidLocation))) 1611 return AliasResult::NoAlias; 1612 1613 // If one the accesses may be before the accessed pointer, canonicalize this 1614 // by using unknown after-pointer sizes for both accesses. This is 1615 // equivalent, because regardless of which pointer is lower, one of them 1616 // will always came after the other, as long as the underlying objects aren't 1617 // disjoint. We do this so that the rest of BasicAA does not have to deal 1618 // with accesses before the base pointer, and to improve cache utilization by 1619 // merging equivalent states. 1620 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) { 1621 V1Size = LocationSize::afterPointer(); 1622 V2Size = LocationSize::afterPointer(); 1623 } 1624 1625 // FIXME: If this depth limit is hit, then we may cache sub-optimal results 1626 // for recursive queries. For this reason, this limit is chosen to be large 1627 // enough to be very rarely hit, while still being small enough to avoid 1628 // stack overflows. 1629 if (AAQI.Depth >= 512) 1630 return AliasResult::MayAlias; 1631 1632 // Check the cache before climbing up use-def chains. This also terminates 1633 // otherwise infinitely recursive queries. 1634 AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size}); 1635 const bool Swapped = V1 > V2; 1636 if (Swapped) 1637 std::swap(Locs.first, Locs.second); 1638 const auto &Pair = AAQI.AliasCache.try_emplace( 1639 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0}); 1640 if (!Pair.second) { 1641 auto &Entry = Pair.first->second; 1642 if (!Entry.isDefinitive()) { 1643 // Remember that we used an assumption. 1644 ++Entry.NumAssumptionUses; 1645 ++AAQI.NumAssumptionUses; 1646 } 1647 // Cache contains sorted {V1,V2} pairs but we should return original order. 1648 auto Result = Entry.Result; 1649 Result.swap(Swapped); 1650 return Result; 1651 } 1652 1653 int OrigNumAssumptionUses = AAQI.NumAssumptionUses; 1654 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size(); 1655 AliasResult Result = 1656 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2); 1657 1658 auto It = AAQI.AliasCache.find(Locs); 1659 assert(It != AAQI.AliasCache.end() && "Must be in cache"); 1660 auto &Entry = It->second; 1661 1662 // Check whether a NoAlias assumption has been used, but disproven. 1663 bool AssumptionDisproven = 1664 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias; 1665 if (AssumptionDisproven) 1666 Result = AliasResult::MayAlias; 1667 1668 // This is a definitive result now, when considered as a root query. 1669 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses; 1670 Entry.Result = Result; 1671 // Cache contains sorted {V1,V2} pairs. 1672 Entry.Result.swap(Swapped); 1673 Entry.NumAssumptionUses = -1; 1674 1675 // If the assumption has been disproven, remove any results that may have 1676 // been based on this assumption. Do this after the Entry updates above to 1677 // avoid iterator invalidation. 1678 if (AssumptionDisproven) 1679 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults) 1680 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val()); 1681 1682 // The result may still be based on assumptions higher up in the chain. 1683 // Remember it, so it can be purged from the cache later. 1684 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses && 1685 Result != AliasResult::MayAlias) 1686 AAQI.AssumptionBasedResults.push_back(Locs); 1687 return Result; 1688 } 1689 1690 AliasResult BasicAAResult::aliasCheckRecursive( 1691 const Value *V1, LocationSize V1Size, 1692 const Value *V2, LocationSize V2Size, 1693 AAQueryInfo &AAQI, const Value *O1, const Value *O2) { 1694 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1695 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI); 1696 if (Result != AliasResult::MayAlias) 1697 return Result; 1698 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) { 1699 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI); 1700 if (Result != AliasResult::MayAlias) 1701 return Result; 1702 } 1703 1704 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1705 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI); 1706 if (Result != AliasResult::MayAlias) 1707 return Result; 1708 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) { 1709 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI); 1710 if (Result != AliasResult::MayAlias) 1711 return Result; 1712 } 1713 1714 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1715 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI); 1716 if (Result != AliasResult::MayAlias) 1717 return Result; 1718 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) { 1719 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI); 1720 if (Result != AliasResult::MayAlias) 1721 return Result; 1722 } 1723 1724 // If both pointers are pointing into the same object and one of them 1725 // accesses the entire object, then the accesses must overlap in some way. 1726 if (O1 == O2) { 1727 bool NullIsValidLocation = NullPointerIsDefined(&F); 1728 if (V1Size.isPrecise() && V2Size.isPrecise() && 1729 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || 1730 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) 1731 return AliasResult::PartialAlias; 1732 } 1733 1734 return AliasResult::MayAlias; 1735 } 1736 1737 /// Check whether two Values can be considered equivalent. 1738 /// 1739 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1740 /// they can not be part of a cycle in the value graph by looking at all 1741 /// visited phi nodes an making sure that the phis cannot reach the value. We 1742 /// have to do this because we are looking through phi nodes (That is we say 1743 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1744 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1745 const Value *V2) { 1746 if (V != V2) 1747 return false; 1748 1749 const Instruction *Inst = dyn_cast<Instruction>(V); 1750 if (!Inst) 1751 return true; 1752 1753 if (VisitedPhiBBs.empty()) 1754 return true; 1755 1756 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1757 return false; 1758 1759 // Make sure that the visited phis cannot reach the Value. This ensures that 1760 // the Values cannot come from different iterations of a potential cycle the 1761 // phi nodes could be involved in. 1762 for (auto *P : VisitedPhiBBs) 1763 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT)) 1764 return false; 1765 1766 return true; 1767 } 1768 1769 /// Computes the symbolic difference between two de-composed GEPs. 1770 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP, 1771 const DecomposedGEP &SrcGEP) { 1772 DestGEP.Offset -= SrcGEP.Offset; 1773 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) { 1774 // Find V in Dest. This is N^2, but pointer indices almost never have more 1775 // than a few variable indexes. 1776 bool Found = false; 1777 for (auto I : enumerate(DestGEP.VarIndices)) { 1778 VariableGEPIndex &Dest = I.value(); 1779 if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) || 1780 !Dest.Val.hasSameCastsAs(Src.Val)) 1781 continue; 1782 1783 // If we found it, subtract off Scale V's from the entry in Dest. If it 1784 // goes to zero, remove the entry. 1785 if (Dest.Scale != Src.Scale) { 1786 Dest.Scale -= Src.Scale; 1787 Dest.IsNSW = false; 1788 } else { 1789 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index()); 1790 } 1791 Found = true; 1792 break; 1793 } 1794 1795 // If we didn't consume this entry, add it to the end of the Dest list. 1796 if (!Found) { 1797 VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW}; 1798 DestGEP.VarIndices.push_back(Entry); 1799 } 1800 } 1801 } 1802 1803 bool BasicAAResult::constantOffsetHeuristic( 1804 const DecomposedGEP &GEP, LocationSize MaybeV1Size, 1805 LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) { 1806 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() || 1807 !MaybeV2Size.hasValue()) 1808 return false; 1809 1810 const uint64_t V1Size = MaybeV1Size.getValue(); 1811 const uint64_t V2Size = MaybeV2Size.getValue(); 1812 1813 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1]; 1814 1815 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) || 1816 Var0.Scale != -Var1.Scale || 1817 Var0.Val.V->getType() != Var1.Val.V->getType()) 1818 return false; 1819 1820 // We'll strip off the Extensions of Var0 and Var1 and do another round 1821 // of GetLinearExpression decomposition. In the example above, if Var0 1822 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 1823 1824 LinearExpression E0 = 1825 GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT); 1826 LinearExpression E1 = 1827 GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT); 1828 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) || 1829 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V)) 1830 return false; 1831 1832 // We have a hit - Var0 and Var1 only differ by a constant offset! 1833 1834 // If we've been sext'ed then zext'd the maximum difference between Var0 and 1835 // Var1 is possible to calculate, but we're just interested in the absolute 1836 // minimum difference between the two. The minimum distance may occur due to 1837 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 1838 // the minimum distance between %i and %i + 5 is 3. 1839 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff; 1840 MinDiff = APIntOps::umin(MinDiff, Wrapped); 1841 APInt MinDiffBytes = 1842 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs(); 1843 1844 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 1845 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 1846 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 1847 // V2Size can fit in the MinDiffBytes gap. 1848 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) && 1849 MinDiffBytes.uge(V2Size + GEP.Offset.abs()); 1850 } 1851 1852 //===----------------------------------------------------------------------===// 1853 // BasicAliasAnalysis Pass 1854 //===----------------------------------------------------------------------===// 1855 1856 AnalysisKey BasicAA::Key; 1857 1858 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 1859 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1860 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1861 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1862 auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F); 1863 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV); 1864 } 1865 1866 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 1867 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 1868 } 1869 1870 char BasicAAWrapperPass::ID = 0; 1871 1872 void BasicAAWrapperPass::anchor() {} 1873 1874 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", 1875 "Basic Alias Analysis (stateless AA impl)", true, true) 1876 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1877 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1878 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1879 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 1880 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa", 1881 "Basic Alias Analysis (stateless AA impl)", true, true) 1882 1883 FunctionPass *llvm::createBasicAAWrapperPass() { 1884 return new BasicAAWrapperPass(); 1885 } 1886 1887 bool BasicAAWrapperPass::runOnFunction(Function &F) { 1888 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 1889 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 1890 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 1891 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>(); 1892 1893 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, 1894 TLIWP.getTLI(F), ACT.getAssumptionCache(F), 1895 &DTWP.getDomTree(), 1896 PVWP ? &PVWP->getResult() : nullptr)); 1897 1898 return false; 1899 } 1900 1901 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1902 AU.setPreservesAll(); 1903 AU.addRequiredTransitive<AssumptionCacheTracker>(); 1904 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 1905 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 1906 AU.addUsedIfAvailable<PhiValuesWrapperPass>(); 1907 } 1908 1909 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 1910 return BasicAAResult( 1911 F.getParent()->getDataLayout(), F, 1912 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 1913 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 1914 } 1915