1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the primary stateless implementation of the 10 // Alias Analysis interface that implements identities (two different 11 // globals cannot alias, etc), but does no stateful analysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/BasicAliasAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ScopeExit.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/CFG.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/MemoryLocation.h" 28 #include "llvm/Analysis/PhiValues.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/ConstantRange.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Dominators.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GetElementPtrTypeIterator.h" 41 #include "llvm/IR/GlobalAlias.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/Metadata.h" 49 #include "llvm/IR/Operator.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Compiler.h" 58 #include "llvm/Support/KnownBits.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <cstdlib> 62 #include <utility> 63 64 #define DEBUG_TYPE "basicaa" 65 66 using namespace llvm; 67 68 /// Enable analysis of recursive PHI nodes. 69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, 70 cl::init(true)); 71 72 /// SearchLimitReached / SearchTimes shows how often the limit of 73 /// to decompose GEPs is reached. It will affect the precision 74 /// of basic alias analysis. 75 STATISTIC(SearchLimitReached, "Number of times the limit to " 76 "decompose GEPs is reached"); 77 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 78 79 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 80 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 81 /// careful with value equivalence. We use reachability to make sure a value 82 /// cannot be involved in a cycle. 83 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 84 85 // The max limit of the search depth in DecomposeGEPExpression() and 86 // getUnderlyingObject(). 87 static const unsigned MaxLookupSearchDepth = 6; 88 89 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, 90 FunctionAnalysisManager::Invalidator &Inv) { 91 // We don't care if this analysis itself is preserved, it has no state. But 92 // we need to check that the analyses it depends on have been. Note that we 93 // may be created without handles to some analyses and in that case don't 94 // depend on them. 95 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) || 96 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) || 97 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA))) 98 return true; 99 100 // Otherwise this analysis result remains valid. 101 return false; 102 } 103 104 //===----------------------------------------------------------------------===// 105 // Useful predicates 106 //===----------------------------------------------------------------------===// 107 108 /// Returns true if the pointer is one which would have been considered an 109 /// escape by isNonEscapingLocalObject. 110 static bool isEscapeSource(const Value *V) { 111 if (isa<CallBase>(V)) 112 return true; 113 114 // The load case works because isNonEscapingLocalObject considers all 115 // stores to be escapes (it passes true for the StoreCaptures argument 116 // to PointerMayBeCaptured). 117 if (isa<LoadInst>(V)) 118 return true; 119 120 // The inttoptr case works because isNonEscapingLocalObject considers all 121 // means of converting or equating a pointer to an int (ptrtoint, ptr store 122 // which could be followed by an integer load, ptr<->int compare) as 123 // escaping, and objects located at well-known addresses via platform-specific 124 // means cannot be considered non-escaping local objects. 125 if (isa<IntToPtrInst>(V)) 126 return true; 127 128 return false; 129 } 130 131 /// Returns the size of the object specified by V or UnknownSize if unknown. 132 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 133 const TargetLibraryInfo &TLI, 134 bool NullIsValidLoc, 135 bool RoundToAlign = false) { 136 uint64_t Size; 137 ObjectSizeOpts Opts; 138 Opts.RoundToAlign = RoundToAlign; 139 Opts.NullIsUnknownSize = NullIsValidLoc; 140 if (getObjectSize(V, Size, DL, &TLI, Opts)) 141 return Size; 142 return MemoryLocation::UnknownSize; 143 } 144 145 /// Returns true if we can prove that the object specified by V is smaller than 146 /// Size. 147 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 148 const DataLayout &DL, 149 const TargetLibraryInfo &TLI, 150 bool NullIsValidLoc) { 151 // Note that the meanings of the "object" are slightly different in the 152 // following contexts: 153 // c1: llvm::getObjectSize() 154 // c2: llvm.objectsize() intrinsic 155 // c3: isObjectSmallerThan() 156 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 157 // refers to the "entire object". 158 // 159 // Consider this example: 160 // char *p = (char*)malloc(100) 161 // char *q = p+80; 162 // 163 // In the context of c1 and c2, the "object" pointed by q refers to the 164 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 165 // 166 // However, in the context of c3, the "object" refers to the chunk of memory 167 // being allocated. So, the "object" has 100 bytes, and q points to the middle 168 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 169 // parameter, before the llvm::getObjectSize() is called to get the size of 170 // entire object, we should: 171 // - either rewind the pointer q to the base-address of the object in 172 // question (in this case rewind to p), or 173 // - just give up. It is up to caller to make sure the pointer is pointing 174 // to the base address the object. 175 // 176 // We go for 2nd option for simplicity. 177 if (!isIdentifiedObject(V)) 178 return false; 179 180 // This function needs to use the aligned object size because we allow 181 // reads a bit past the end given sufficient alignment. 182 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, 183 /*RoundToAlign*/ true); 184 185 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 186 } 187 188 /// Return the minimal extent from \p V to the end of the underlying object, 189 /// assuming the result is used in an aliasing query. E.g., we do use the query 190 /// location size and the fact that null pointers cannot alias here. 191 static uint64_t getMinimalExtentFrom(const Value &V, 192 const LocationSize &LocSize, 193 const DataLayout &DL, 194 bool NullIsValidLoc) { 195 // If we have dereferenceability information we know a lower bound for the 196 // extent as accesses for a lower offset would be valid. We need to exclude 197 // the "or null" part if null is a valid pointer. We can ignore frees, as an 198 // access after free would be undefined behavior. 199 bool CanBeNull, CanBeFreed; 200 uint64_t DerefBytes = 201 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 202 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; 203 // If queried with a precise location size, we assume that location size to be 204 // accessed, thus valid. 205 if (LocSize.isPrecise()) 206 DerefBytes = std::max(DerefBytes, LocSize.getValue()); 207 return DerefBytes; 208 } 209 210 /// Returns true if we can prove that the object specified by V has size Size. 211 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 212 const TargetLibraryInfo &TLI, bool NullIsValidLoc) { 213 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc); 214 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 215 } 216 217 //===----------------------------------------------------------------------===// 218 // CaptureInfo implementations 219 //===----------------------------------------------------------------------===// 220 221 CaptureInfo::~CaptureInfo() = default; 222 223 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object, 224 const Instruction *I) { 225 return isNonEscapingLocalObject(Object, &IsCapturedCache); 226 } 227 228 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object, 229 const Instruction *I) { 230 if (!isIdentifiedFunctionLocal(Object)) 231 return false; 232 233 auto Iter = EarliestEscapes.insert({Object, nullptr}); 234 if (Iter.second) { 235 Instruction *EarliestCapture = FindEarliestCapture( 236 Object, *const_cast<Function *>(I->getFunction()), 237 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT); 238 if (EarliestCapture) { 239 auto Ins = Inst2Obj.insert({EarliestCapture, {}}); 240 Ins.first->second.push_back(Object); 241 } 242 Iter.first->second = EarliestCapture; 243 } 244 245 // No capturing instruction. 246 if (!Iter.first->second) 247 return true; 248 249 return I != Iter.first->second && 250 !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI); 251 } 252 253 void EarliestEscapeInfo::removeInstruction(Instruction *I) { 254 auto Iter = Inst2Obj.find(I); 255 if (Iter != Inst2Obj.end()) { 256 for (const Value *Obj : Iter->second) 257 EarliestEscapes.erase(Obj); 258 Inst2Obj.erase(I); 259 } 260 } 261 262 //===----------------------------------------------------------------------===// 263 // GetElementPtr Instruction Decomposition and Analysis 264 //===----------------------------------------------------------------------===// 265 266 namespace { 267 /// Represents zext(sext(trunc(V))). 268 struct CastedValue { 269 const Value *V; 270 unsigned ZExtBits = 0; 271 unsigned SExtBits = 0; 272 unsigned TruncBits = 0; 273 274 explicit CastedValue(const Value *V) : V(V) {} 275 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits, 276 unsigned TruncBits) 277 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {} 278 279 unsigned getBitWidth() const { 280 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits + 281 SExtBits; 282 } 283 284 CastedValue withValue(const Value *NewV) const { 285 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits); 286 } 287 288 /// Replace V with zext(NewV) 289 CastedValue withZExtOfValue(const Value *NewV) const { 290 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - 291 NewV->getType()->getPrimitiveSizeInBits(); 292 if (ExtendBy <= TruncBits) 293 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy); 294 295 // zext(sext(zext(NewV))) == zext(zext(zext(NewV))) 296 ExtendBy -= TruncBits; 297 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0); 298 } 299 300 /// Replace V with sext(NewV) 301 CastedValue withSExtOfValue(const Value *NewV) const { 302 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - 303 NewV->getType()->getPrimitiveSizeInBits(); 304 if (ExtendBy <= TruncBits) 305 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy); 306 307 // zext(sext(sext(NewV))) 308 ExtendBy -= TruncBits; 309 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0); 310 } 311 312 APInt evaluateWith(APInt N) const { 313 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 314 "Incompatible bit width"); 315 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits); 316 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits); 317 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits); 318 return N; 319 } 320 321 KnownBits evaluateWith(KnownBits N) const { 322 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 323 "Incompatible bit width"); 324 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits); 325 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits); 326 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits); 327 return N; 328 } 329 330 ConstantRange evaluateWith(ConstantRange N) const { 331 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 332 "Incompatible bit width"); 333 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits); 334 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits); 335 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits); 336 return N; 337 } 338 339 bool canDistributeOver(bool NUW, bool NSW) const { 340 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y) 341 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y) 342 // trunc(x op y) == trunc(x) op trunc(y) 343 return (!ZExtBits || NUW) && (!SExtBits || NSW); 344 } 345 346 bool hasSameCastsAs(const CastedValue &Other) const { 347 return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits && 348 TruncBits == Other.TruncBits; 349 } 350 }; 351 352 /// Represents zext(sext(trunc(V))) * Scale + Offset. 353 struct LinearExpression { 354 CastedValue Val; 355 APInt Scale; 356 APInt Offset; 357 358 /// True if all operations in this expression are NSW. 359 bool IsNSW; 360 361 LinearExpression(const CastedValue &Val, const APInt &Scale, 362 const APInt &Offset, bool IsNSW) 363 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {} 364 365 LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) { 366 unsigned BitWidth = Val.getBitWidth(); 367 Scale = APInt(BitWidth, 1); 368 Offset = APInt(BitWidth, 0); 369 } 370 }; 371 } 372 373 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 374 /// B are constant integers. 375 static LinearExpression GetLinearExpression( 376 const CastedValue &Val, const DataLayout &DL, unsigned Depth, 377 AssumptionCache *AC, DominatorTree *DT) { 378 // Limit our recursion depth. 379 if (Depth == 6) 380 return Val; 381 382 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V)) 383 return LinearExpression(Val, APInt(Val.getBitWidth(), 0), 384 Val.evaluateWith(Const->getValue()), true); 385 386 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) { 387 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 388 APInt RHS = Val.evaluateWith(RHSC->getValue()); 389 // The only non-OBO case we deal with is or, and only limited to the 390 // case where it is both nuw and nsw. 391 bool NUW = true, NSW = true; 392 if (isa<OverflowingBinaryOperator>(BOp)) { 393 NUW &= BOp->hasNoUnsignedWrap(); 394 NSW &= BOp->hasNoSignedWrap(); 395 } 396 if (!Val.canDistributeOver(NUW, NSW)) 397 return Val; 398 399 // While we can distribute over trunc, we cannot preserve nowrap flags 400 // in that case. 401 if (Val.TruncBits) 402 NUW = NSW = false; 403 404 LinearExpression E(Val); 405 switch (BOp->getOpcode()) { 406 default: 407 // We don't understand this instruction, so we can't decompose it any 408 // further. 409 return Val; 410 case Instruction::Or: 411 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 412 // analyze it. 413 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 414 BOp, DT)) 415 return Val; 416 417 LLVM_FALLTHROUGH; 418 case Instruction::Add: { 419 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 420 Depth + 1, AC, DT); 421 E.Offset += RHS; 422 E.IsNSW &= NSW; 423 break; 424 } 425 case Instruction::Sub: { 426 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 427 Depth + 1, AC, DT); 428 E.Offset -= RHS; 429 E.IsNSW &= NSW; 430 break; 431 } 432 case Instruction::Mul: { 433 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 434 Depth + 1, AC, DT); 435 E.Offset *= RHS; 436 E.Scale *= RHS; 437 E.IsNSW &= NSW; 438 break; 439 } 440 case Instruction::Shl: 441 // We're trying to linearize an expression of the kind: 442 // shl i8 -128, 36 443 // where the shift count exceeds the bitwidth of the type. 444 // We can't decompose this further (the expression would return 445 // a poison value). 446 if (RHS.getLimitedValue() > Val.getBitWidth()) 447 return Val; 448 449 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 450 Depth + 1, AC, DT); 451 E.Offset <<= RHS.getLimitedValue(); 452 E.Scale <<= RHS.getLimitedValue(); 453 E.IsNSW &= NSW; 454 break; 455 } 456 return E; 457 } 458 } 459 460 if (isa<ZExtInst>(Val.V)) 461 return GetLinearExpression( 462 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), 463 DL, Depth + 1, AC, DT); 464 465 if (isa<SExtInst>(Val.V)) 466 return GetLinearExpression( 467 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), 468 DL, Depth + 1, AC, DT); 469 470 return Val; 471 } 472 473 /// To ensure a pointer offset fits in an integer of size PointerSize 474 /// (in bits) when that size is smaller than the maximum pointer size. This is 475 /// an issue, for example, in particular for 32b pointers with negative indices 476 /// that rely on two's complement wrap-arounds for precise alias information 477 /// where the maximum pointer size is 64b. 478 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) { 479 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!"); 480 unsigned ShiftBits = Offset.getBitWidth() - PointerSize; 481 return (Offset << ShiftBits).ashr(ShiftBits); 482 } 483 484 namespace { 485 // A linear transformation of a Value; this class represents 486 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale. 487 struct VariableGEPIndex { 488 CastedValue Val; 489 APInt Scale; 490 491 // Context instruction to use when querying information about this index. 492 const Instruction *CxtI; 493 494 /// True if all operations in this expression are NSW. 495 bool IsNSW; 496 497 void dump() const { 498 print(dbgs()); 499 dbgs() << "\n"; 500 } 501 void print(raw_ostream &OS) const { 502 OS << "(V=" << Val.V->getName() 503 << ", zextbits=" << Val.ZExtBits 504 << ", sextbits=" << Val.SExtBits 505 << ", truncbits=" << Val.TruncBits 506 << ", scale=" << Scale << ")"; 507 } 508 }; 509 } 510 511 // Represents the internal structure of a GEP, decomposed into a base pointer, 512 // constant offsets, and variable scaled indices. 513 struct BasicAAResult::DecomposedGEP { 514 // Base pointer of the GEP 515 const Value *Base; 516 // Total constant offset from base. 517 APInt Offset; 518 // Scaled variable (non-constant) indices. 519 SmallVector<VariableGEPIndex, 4> VarIndices; 520 // Are all operations inbounds GEPs or non-indexing operations? 521 // (None iff expression doesn't involve any geps) 522 Optional<bool> InBounds; 523 524 void dump() const { 525 print(dbgs()); 526 dbgs() << "\n"; 527 } 528 void print(raw_ostream &OS) const { 529 OS << "(DecomposedGEP Base=" << Base->getName() 530 << ", Offset=" << Offset 531 << ", VarIndices=["; 532 for (size_t i = 0; i < VarIndices.size(); i++) { 533 if (i != 0) 534 OS << ", "; 535 VarIndices[i].print(OS); 536 } 537 OS << "])"; 538 } 539 }; 540 541 542 /// If V is a symbolic pointer expression, decompose it into a base pointer 543 /// with a constant offset and a number of scaled symbolic offsets. 544 /// 545 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 546 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 547 /// specified amount, but which may have other unrepresented high bits. As 548 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 549 BasicAAResult::DecomposedGEP 550 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL, 551 AssumptionCache *AC, DominatorTree *DT) { 552 // Limit recursion depth to limit compile time in crazy cases. 553 unsigned MaxLookup = MaxLookupSearchDepth; 554 SearchTimes++; 555 const Instruction *CxtI = dyn_cast<Instruction>(V); 556 557 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits(); 558 DecomposedGEP Decomposed; 559 Decomposed.Offset = APInt(MaxPointerSize, 0); 560 do { 561 // See if this is a bitcast or GEP. 562 const Operator *Op = dyn_cast<Operator>(V); 563 if (!Op) { 564 // The only non-operator case we can handle are GlobalAliases. 565 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 566 if (!GA->isInterposable()) { 567 V = GA->getAliasee(); 568 continue; 569 } 570 } 571 Decomposed.Base = V; 572 return Decomposed; 573 } 574 575 if (Op->getOpcode() == Instruction::BitCast || 576 Op->getOpcode() == Instruction::AddrSpaceCast) { 577 V = Op->getOperand(0); 578 continue; 579 } 580 581 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 582 if (!GEPOp) { 583 if (const auto *PHI = dyn_cast<PHINode>(V)) { 584 // Look through single-arg phi nodes created by LCSSA. 585 if (PHI->getNumIncomingValues() == 1) { 586 V = PHI->getIncomingValue(0); 587 continue; 588 } 589 } else if (const auto *Call = dyn_cast<CallBase>(V)) { 590 // CaptureTracking can know about special capturing properties of some 591 // intrinsics like launder.invariant.group, that can't be expressed with 592 // the attributes, but have properties like returning aliasing pointer. 593 // Because some analysis may assume that nocaptured pointer is not 594 // returned from some special intrinsic (because function would have to 595 // be marked with returns attribute), it is crucial to use this function 596 // because it should be in sync with CaptureTracking. Not using it may 597 // cause weird miscompilations where 2 aliasing pointers are assumed to 598 // noalias. 599 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 600 V = RP; 601 continue; 602 } 603 } 604 605 Decomposed.Base = V; 606 return Decomposed; 607 } 608 609 // Track whether we've seen at least one in bounds gep, and if so, whether 610 // all geps parsed were in bounds. 611 if (Decomposed.InBounds == None) 612 Decomposed.InBounds = GEPOp->isInBounds(); 613 else if (!GEPOp->isInBounds()) 614 Decomposed.InBounds = false; 615 616 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized"); 617 618 // Don't attempt to analyze GEPs if index scale is not a compile-time 619 // constant. 620 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) { 621 Decomposed.Base = V; 622 return Decomposed; 623 } 624 625 unsigned AS = GEPOp->getPointerAddressSpace(); 626 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 627 gep_type_iterator GTI = gep_type_begin(GEPOp); 628 unsigned PointerSize = DL.getPointerSizeInBits(AS); 629 // Assume all GEP operands are constants until proven otherwise. 630 bool GepHasConstantOffset = true; 631 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 632 I != E; ++I, ++GTI) { 633 const Value *Index = *I; 634 // Compute the (potentially symbolic) offset in bytes for this index. 635 if (StructType *STy = GTI.getStructTypeOrNull()) { 636 // For a struct, add the member offset. 637 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 638 if (FieldNo == 0) 639 continue; 640 641 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo); 642 continue; 643 } 644 645 // For an array/pointer, add the element offset, explicitly scaled. 646 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 647 if (CIdx->isZero()) 648 continue; 649 Decomposed.Offset += 650 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() * 651 CIdx->getValue().sextOrTrunc(MaxPointerSize); 652 continue; 653 } 654 655 GepHasConstantOffset = false; 656 657 APInt Scale(MaxPointerSize, 658 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); 659 // If the integer type is smaller than the pointer size, it is implicitly 660 // sign extended to pointer size. 661 unsigned Width = Index->getType()->getIntegerBitWidth(); 662 unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0; 663 unsigned TruncBits = PointerSize < Width ? Width - PointerSize : 0; 664 LinearExpression LE = GetLinearExpression( 665 CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT); 666 667 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 668 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 669 670 // It can be the case that, even through C1*V+C2 does not overflow for 671 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot 672 // decompose the expression in this way. 673 // 674 // FIXME: C1*Scale and the other operations in the decomposed 675 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this 676 // possibility. 677 bool Overflow; 678 APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize) 679 .smul_ov(Scale, Overflow); 680 if (Overflow) { 681 LE = LinearExpression(CastedValue(Index, 0, SExtBits, TruncBits)); 682 } else { 683 Decomposed.Offset += ScaledOffset; 684 Scale *= LE.Scale.sextOrTrunc(MaxPointerSize); 685 } 686 687 // If we already had an occurrence of this index variable, merge this 688 // scale into it. For example, we want to handle: 689 // A[x][x] -> x*16 + x*4 -> x*20 690 // This also ensures that 'x' only appears in the index list once. 691 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 692 if (Decomposed.VarIndices[i].Val.V == LE.Val.V && 693 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) { 694 Scale += Decomposed.VarIndices[i].Scale; 695 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 696 break; 697 } 698 } 699 700 // Make sure that we have a scale that makes sense for this target's 701 // pointer size. 702 Scale = adjustToPointerSize(Scale, PointerSize); 703 704 if (!!Scale) { 705 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW}; 706 Decomposed.VarIndices.push_back(Entry); 707 } 708 } 709 710 // Take care of wrap-arounds 711 if (GepHasConstantOffset) 712 Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize); 713 714 // Analyze the base pointer next. 715 V = GEPOp->getOperand(0); 716 } while (--MaxLookup); 717 718 // If the chain of expressions is too deep, just return early. 719 Decomposed.Base = V; 720 SearchLimitReached++; 721 return Decomposed; 722 } 723 724 /// Returns whether the given pointer value points to memory that is local to 725 /// the function, with global constants being considered local to all 726 /// functions. 727 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 728 AAQueryInfo &AAQI, bool OrLocal) { 729 assert(Visited.empty() && "Visited must be cleared after use!"); 730 731 unsigned MaxLookup = 8; 732 SmallVector<const Value *, 16> Worklist; 733 Worklist.push_back(Loc.Ptr); 734 do { 735 const Value *V = getUnderlyingObject(Worklist.pop_back_val()); 736 if (!Visited.insert(V).second) { 737 Visited.clear(); 738 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 739 } 740 741 // An alloca instruction defines local memory. 742 if (OrLocal && isa<AllocaInst>(V)) 743 continue; 744 745 // A global constant counts as local memory for our purposes. 746 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 747 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 748 // global to be marked constant in some modules and non-constant in 749 // others. GV may even be a declaration, not a definition. 750 if (!GV->isConstant()) { 751 Visited.clear(); 752 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 753 } 754 continue; 755 } 756 757 // If both select values point to local memory, then so does the select. 758 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 759 Worklist.push_back(SI->getTrueValue()); 760 Worklist.push_back(SI->getFalseValue()); 761 continue; 762 } 763 764 // If all values incoming to a phi node point to local memory, then so does 765 // the phi. 766 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 767 // Don't bother inspecting phi nodes with many operands. 768 if (PN->getNumIncomingValues() > MaxLookup) { 769 Visited.clear(); 770 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 771 } 772 append_range(Worklist, PN->incoming_values()); 773 continue; 774 } 775 776 // Otherwise be conservative. 777 Visited.clear(); 778 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 779 } while (!Worklist.empty() && --MaxLookup); 780 781 Visited.clear(); 782 return Worklist.empty(); 783 } 784 785 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { 786 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call); 787 return II && II->getIntrinsicID() == IID; 788 } 789 790 /// Returns the behavior when calling the given call site. 791 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { 792 if (Call->doesNotAccessMemory()) 793 // Can't do better than this. 794 return FMRB_DoesNotAccessMemory; 795 796 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 797 798 // If the callsite knows it only reads memory, don't return worse 799 // than that. 800 if (Call->onlyReadsMemory()) 801 Min = FMRB_OnlyReadsMemory; 802 else if (Call->doesNotReadMemory()) 803 Min = FMRB_OnlyWritesMemory; 804 805 if (Call->onlyAccessesArgMemory()) 806 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 807 else if (Call->onlyAccessesInaccessibleMemory()) 808 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 809 else if (Call->onlyAccessesInaccessibleMemOrArgMem()) 810 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 811 812 // If the call has operand bundles then aliasing attributes from the function 813 // it calls do not directly apply to the call. This can be made more precise 814 // in the future. 815 if (!Call->hasOperandBundles()) 816 if (const Function *F = Call->getCalledFunction()) 817 Min = 818 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 819 820 return Min; 821 } 822 823 /// Returns the behavior when calling the given function. For use when the call 824 /// site is not known. 825 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 826 // If the function declares it doesn't access memory, we can't do better. 827 if (F->doesNotAccessMemory()) 828 return FMRB_DoesNotAccessMemory; 829 830 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 831 832 // If the function declares it only reads memory, go with that. 833 if (F->onlyReadsMemory()) 834 Min = FMRB_OnlyReadsMemory; 835 else if (F->doesNotReadMemory()) 836 Min = FMRB_OnlyWritesMemory; 837 838 if (F->onlyAccessesArgMemory()) 839 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 840 else if (F->onlyAccessesInaccessibleMemory()) 841 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 842 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 843 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 844 845 return Min; 846 } 847 848 /// Returns true if this is a writeonly (i.e Mod only) parameter. 849 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, 850 const TargetLibraryInfo &TLI) { 851 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) 852 return true; 853 854 // We can bound the aliasing properties of memset_pattern16 just as we can 855 // for memcpy/memset. This is particularly important because the 856 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 857 // whenever possible. 858 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 859 // attributes. 860 LibFunc F; 861 if (Call->getCalledFunction() && 862 TLI.getLibFunc(*Call->getCalledFunction(), F) && 863 F == LibFunc_memset_pattern16 && TLI.has(F)) 864 if (ArgIdx == 0) 865 return true; 866 867 // TODO: memset_pattern4, memset_pattern8 868 // TODO: _chk variants 869 // TODO: strcmp, strcpy 870 871 return false; 872 } 873 874 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, 875 unsigned ArgIdx) { 876 // Checking for known builtin intrinsics and target library functions. 877 if (isWriteOnlyParam(Call, ArgIdx, TLI)) 878 return ModRefInfo::Mod; 879 880 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) 881 return ModRefInfo::Ref; 882 883 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) 884 return ModRefInfo::NoModRef; 885 886 return AAResultBase::getArgModRefInfo(Call, ArgIdx); 887 } 888 889 #ifndef NDEBUG 890 static const Function *getParent(const Value *V) { 891 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 892 if (!inst->getParent()) 893 return nullptr; 894 return inst->getParent()->getParent(); 895 } 896 897 if (const Argument *arg = dyn_cast<Argument>(V)) 898 return arg->getParent(); 899 900 return nullptr; 901 } 902 903 static bool notDifferentParent(const Value *O1, const Value *O2) { 904 905 const Function *F1 = getParent(O1); 906 const Function *F2 = getParent(O2); 907 908 return !F1 || !F2 || F1 == F2; 909 } 910 #endif 911 912 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 913 const MemoryLocation &LocB, 914 AAQueryInfo &AAQI) { 915 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 916 "BasicAliasAnalysis doesn't support interprocedural queries."); 917 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI); 918 } 919 920 /// Checks to see if the specified callsite can clobber the specified memory 921 /// object. 922 /// 923 /// Since we only look at local properties of this function, we really can't 924 /// say much about this query. We do, however, use simple "address taken" 925 /// analysis on local objects. 926 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, 927 const MemoryLocation &Loc, 928 AAQueryInfo &AAQI) { 929 assert(notDifferentParent(Call, Loc.Ptr) && 930 "AliasAnalysis query involving multiple functions!"); 931 932 const Value *Object = getUnderlyingObject(Loc.Ptr); 933 934 // Calls marked 'tail' cannot read or write allocas from the current frame 935 // because the current frame might be destroyed by the time they run. However, 936 // a tail call may use an alloca with byval. Calling with byval copies the 937 // contents of the alloca into argument registers or stack slots, so there is 938 // no lifetime issue. 939 if (isa<AllocaInst>(Object)) 940 if (const CallInst *CI = dyn_cast<CallInst>(Call)) 941 if (CI->isTailCall() && 942 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 943 return ModRefInfo::NoModRef; 944 945 // Stack restore is able to modify unescaped dynamic allocas. Assume it may 946 // modify them even though the alloca is not escaped. 947 if (auto *AI = dyn_cast<AllocaInst>(Object)) 948 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore)) 949 return ModRefInfo::Mod; 950 951 // If the pointer is to a locally allocated object that does not escape, 952 // then the call can not mod/ref the pointer unless the call takes the pointer 953 // as an argument, and itself doesn't capture it. 954 if (!isa<Constant>(Object) && Call != Object && 955 AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) { 956 957 // Optimistically assume that call doesn't touch Object and check this 958 // assumption in the following loop. 959 ModRefInfo Result = ModRefInfo::NoModRef; 960 bool IsMustAlias = true; 961 962 unsigned OperandNo = 0; 963 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); 964 CI != CE; ++CI, ++OperandNo) { 965 // Only look at the no-capture or byval pointer arguments. If this 966 // pointer were passed to arguments that were neither of these, then it 967 // couldn't be no-capture. 968 if (!(*CI)->getType()->isPointerTy() || 969 (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() && 970 !Call->isByValArgument(OperandNo))) 971 continue; 972 973 // Call doesn't access memory through this operand, so we don't care 974 // if it aliases with Object. 975 if (Call->doesNotAccessMemory(OperandNo)) 976 continue; 977 978 // If this is a no-capture pointer argument, see if we can tell that it 979 // is impossible to alias the pointer we're checking. 980 AliasResult AR = getBestAAResults().alias( 981 MemoryLocation::getBeforeOrAfter(*CI), 982 MemoryLocation::getBeforeOrAfter(Object), AAQI); 983 if (AR != AliasResult::MustAlias) 984 IsMustAlias = false; 985 // Operand doesn't alias 'Object', continue looking for other aliases 986 if (AR == AliasResult::NoAlias) 987 continue; 988 // Operand aliases 'Object', but call doesn't modify it. Strengthen 989 // initial assumption and keep looking in case if there are more aliases. 990 if (Call->onlyReadsMemory(OperandNo)) { 991 Result = setRef(Result); 992 continue; 993 } 994 // Operand aliases 'Object' but call only writes into it. 995 if (Call->doesNotReadMemory(OperandNo)) { 996 Result = setMod(Result); 997 continue; 998 } 999 // This operand aliases 'Object' and call reads and writes into it. 1000 // Setting ModRef will not yield an early return below, MustAlias is not 1001 // used further. 1002 Result = ModRefInfo::ModRef; 1003 break; 1004 } 1005 1006 // No operand aliases, reset Must bit. Add below if at least one aliases 1007 // and all aliases found are MustAlias. 1008 if (isNoModRef(Result)) 1009 IsMustAlias = false; 1010 1011 // Early return if we improved mod ref information 1012 if (!isModAndRefSet(Result)) { 1013 if (isNoModRef(Result)) 1014 return ModRefInfo::NoModRef; 1015 return IsMustAlias ? setMust(Result) : clearMust(Result); 1016 } 1017 } 1018 1019 // If the call is malloc/calloc like, we can assume that it doesn't 1020 // modify any IR visible value. This is only valid because we assume these 1021 // routines do not read values visible in the IR. TODO: Consider special 1022 // casing realloc and strdup routines which access only their arguments as 1023 // well. Or alternatively, replace all of this with inaccessiblememonly once 1024 // that's implemented fully. 1025 if (isMallocOrCallocLikeFn(Call, &TLI)) { 1026 // Be conservative if the accessed pointer may alias the allocation - 1027 // fallback to the generic handling below. 1028 if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc, 1029 AAQI) == AliasResult::NoAlias) 1030 return ModRefInfo::NoModRef; 1031 } 1032 1033 // The semantics of memcpy intrinsics either exactly overlap or do not 1034 // overlap, i.e., source and destination of any given memcpy are either 1035 // no-alias or must-alias. 1036 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) { 1037 AliasResult SrcAA = 1038 getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI); 1039 AliasResult DestAA = 1040 getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI); 1041 // It's also possible for Loc to alias both src and dest, or neither. 1042 ModRefInfo rv = ModRefInfo::NoModRef; 1043 if (SrcAA != AliasResult::NoAlias) 1044 rv = setRef(rv); 1045 if (DestAA != AliasResult::NoAlias) 1046 rv = setMod(rv); 1047 return rv; 1048 } 1049 1050 // Guard intrinsics are marked as arbitrarily writing so that proper control 1051 // dependencies are maintained but they never mods any particular memory 1052 // location. 1053 // 1054 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1055 // heap state at the point the guard is issued needs to be consistent in case 1056 // the guard invokes the "deopt" continuation. 1057 if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) 1058 return ModRefInfo::Ref; 1059 // The same applies to deoptimize which is essentially a guard(false). 1060 if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize)) 1061 return ModRefInfo::Ref; 1062 1063 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 1064 // writing so that proper control dependencies are maintained but they never 1065 // mod any particular memory location visible to the IR. 1066 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 1067 // intrinsic is now modeled as reading memory. This prevents hoisting the 1068 // invariant.start intrinsic over stores. Consider: 1069 // *ptr = 40; 1070 // *ptr = 50; 1071 // invariant_start(ptr) 1072 // int val = *ptr; 1073 // print(val); 1074 // 1075 // This cannot be transformed to: 1076 // 1077 // *ptr = 40; 1078 // invariant_start(ptr) 1079 // *ptr = 50; 1080 // int val = *ptr; 1081 // print(val); 1082 // 1083 // The transformation will cause the second store to be ignored (based on 1084 // rules of invariant.start) and print 40, while the first program always 1085 // prints 50. 1086 if (isIntrinsicCall(Call, Intrinsic::invariant_start)) 1087 return ModRefInfo::Ref; 1088 1089 // The AAResultBase base class has some smarts, lets use them. 1090 return AAResultBase::getModRefInfo(Call, Loc, AAQI); 1091 } 1092 1093 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, 1094 const CallBase *Call2, 1095 AAQueryInfo &AAQI) { 1096 // Guard intrinsics are marked as arbitrarily writing so that proper control 1097 // dependencies are maintained but they never mods any particular memory 1098 // location. 1099 // 1100 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1101 // heap state at the point the guard is issued needs to be consistent in case 1102 // the guard invokes the "deopt" continuation. 1103 1104 // NB! This function is *not* commutative, so we special case two 1105 // possibilities for guard intrinsics. 1106 1107 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) 1108 return isModSet(createModRefInfo(getModRefBehavior(Call2))) 1109 ? ModRefInfo::Ref 1110 : ModRefInfo::NoModRef; 1111 1112 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) 1113 return isModSet(createModRefInfo(getModRefBehavior(Call1))) 1114 ? ModRefInfo::Mod 1115 : ModRefInfo::NoModRef; 1116 1117 // The AAResultBase base class has some smarts, lets use them. 1118 return AAResultBase::getModRefInfo(Call1, Call2, AAQI); 1119 } 1120 1121 /// Return true if we know V to the base address of the corresponding memory 1122 /// object. This implies that any address less than V must be out of bounds 1123 /// for the underlying object. Note that just being isIdentifiedObject() is 1124 /// not enough - For example, a negative offset from a noalias argument or call 1125 /// can be inbounds w.r.t the actual underlying object. 1126 static bool isBaseOfObject(const Value *V) { 1127 // TODO: We can handle other cases here 1128 // 1) For GC languages, arguments to functions are often required to be 1129 // base pointers. 1130 // 2) Result of allocation routines are often base pointers. Leverage TLI. 1131 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V)); 1132 } 1133 1134 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1135 /// another pointer. 1136 /// 1137 /// We know that V1 is a GEP, but we don't know anything about V2. 1138 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for 1139 /// V2. 1140 AliasResult BasicAAResult::aliasGEP( 1141 const GEPOperator *GEP1, LocationSize V1Size, 1142 const Value *V2, LocationSize V2Size, 1143 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { 1144 if (!V1Size.hasValue() && !V2Size.hasValue()) { 1145 // TODO: This limitation exists for compile-time reasons. Relax it if we 1146 // can avoid exponential pathological cases. 1147 if (!isa<GEPOperator>(V2)) 1148 return AliasResult::MayAlias; 1149 1150 // If both accesses have unknown size, we can only check whether the base 1151 // objects don't alias. 1152 AliasResult BaseAlias = getBestAAResults().alias( 1153 MemoryLocation::getBeforeOrAfter(UnderlyingV1), 1154 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI); 1155 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias 1156 : AliasResult::MayAlias; 1157 } 1158 1159 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT); 1160 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT); 1161 1162 // Bail if we were not able to decompose anything. 1163 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2) 1164 return AliasResult::MayAlias; 1165 1166 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1167 // symbolic difference. 1168 subtractDecomposedGEPs(DecompGEP1, DecompGEP2); 1169 1170 // If an inbounds GEP would have to start from an out of bounds address 1171 // for the two to alias, then we can assume noalias. 1172 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() && 1173 V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) && 1174 isBaseOfObject(DecompGEP2.Base)) 1175 return AliasResult::NoAlias; 1176 1177 if (isa<GEPOperator>(V2)) { 1178 // Symmetric case to above. 1179 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() && 1180 V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) && 1181 isBaseOfObject(DecompGEP1.Base)) 1182 return AliasResult::NoAlias; 1183 } 1184 1185 // For GEPs with identical offsets, we can preserve the size and AAInfo 1186 // when performing the alias check on the underlying objects. 1187 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty()) 1188 return getBestAAResults().alias(MemoryLocation(DecompGEP1.Base, V1Size), 1189 MemoryLocation(DecompGEP2.Base, V2Size), 1190 AAQI); 1191 1192 // Do the base pointers alias? 1193 AliasResult BaseAlias = getBestAAResults().alias( 1194 MemoryLocation::getBeforeOrAfter(DecompGEP1.Base), 1195 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI); 1196 1197 // If we get a No or May, then return it immediately, no amount of analysis 1198 // will improve this situation. 1199 if (BaseAlias != AliasResult::MustAlias) { 1200 assert(BaseAlias == AliasResult::NoAlias || 1201 BaseAlias == AliasResult::MayAlias); 1202 return BaseAlias; 1203 } 1204 1205 // If there is a constant difference between the pointers, but the difference 1206 // is less than the size of the associated memory object, then we know 1207 // that the objects are partially overlapping. If the difference is 1208 // greater, we know they do not overlap. 1209 if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) { 1210 APInt &Off = DecompGEP1.Offset; 1211 1212 // Initialize for Off >= 0 (V2 <= GEP1) case. 1213 const Value *LeftPtr = V2; 1214 const Value *RightPtr = GEP1; 1215 LocationSize VLeftSize = V2Size; 1216 LocationSize VRightSize = V1Size; 1217 const bool Swapped = Off.isNegative(); 1218 1219 if (Swapped) { 1220 // Swap if we have the situation where: 1221 // + + 1222 // | BaseOffset | 1223 // ---------------->| 1224 // |-->V1Size |-------> V2Size 1225 // GEP1 V2 1226 std::swap(LeftPtr, RightPtr); 1227 std::swap(VLeftSize, VRightSize); 1228 Off = -Off; 1229 } 1230 1231 if (VLeftSize.hasValue()) { 1232 const uint64_t LSize = VLeftSize.getValue(); 1233 if (Off.ult(LSize)) { 1234 // Conservatively drop processing if a phi was visited and/or offset is 1235 // too big. 1236 AliasResult AR = AliasResult::PartialAlias; 1237 if (VRightSize.hasValue() && Off.ule(INT32_MAX) && 1238 (Off + VRightSize.getValue()).ule(LSize)) { 1239 // Memory referenced by right pointer is nested. Save the offset in 1240 // cache. Note that originally offset estimated as GEP1-V2, but 1241 // AliasResult contains the shift that represents GEP1+Offset=V2. 1242 AR.setOffset(-Off.getSExtValue()); 1243 AR.swap(Swapped); 1244 } 1245 return AR; 1246 } 1247 return AliasResult::NoAlias; 1248 } 1249 } 1250 1251 if (!DecompGEP1.VarIndices.empty()) { 1252 APInt GCD; 1253 bool AllNonNegative = DecompGEP1.Offset.isNonNegative(); 1254 bool AllNonPositive = DecompGEP1.Offset.isNonPositive(); 1255 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1256 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i]; 1257 const APInt &Scale = Index.Scale; 1258 APInt ScaleForGCD = Scale; 1259 if (!Index.IsNSW) 1260 ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(), 1261 Scale.countTrailingZeros()); 1262 1263 if (i == 0) 1264 GCD = ScaleForGCD.abs(); 1265 else 1266 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs()); 1267 1268 if (AllNonNegative || AllNonPositive) { 1269 KnownBits Known = Index.Val.evaluateWith( 1270 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT)); 1271 bool SignKnownZero = Known.isNonNegative(); 1272 bool SignKnownOne = Known.isNegative(); 1273 AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) || 1274 (SignKnownOne && Scale.isNonPositive()); 1275 AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) || 1276 (SignKnownOne && Scale.isNonNegative()); 1277 } 1278 } 1279 1280 // We now have accesses at two offsets from the same base: 1281 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size 1282 // 2. 0 with size V2Size 1283 // Using arithmetic modulo GCD, the accesses are at 1284 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits 1285 // into the range [V2Size..GCD), then we know they cannot overlap. 1286 APInt ModOffset = DecompGEP1.Offset.srem(GCD); 1287 if (ModOffset.isNegative()) 1288 ModOffset += GCD; // We want mod, not rem. 1289 if (V1Size.hasValue() && V2Size.hasValue() && 1290 ModOffset.uge(V2Size.getValue()) && 1291 (GCD - ModOffset).uge(V1Size.getValue())) 1292 return AliasResult::NoAlias; 1293 1294 // If we know all the variables are non-negative, then the total offset is 1295 // also non-negative and >= DecompGEP1.Offset. We have the following layout: 1296 // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size] 1297 // If DecompGEP1.Offset >= V2Size, the accesses don't alias. 1298 if (AllNonNegative && V2Size.hasValue() && 1299 DecompGEP1.Offset.uge(V2Size.getValue())) 1300 return AliasResult::NoAlias; 1301 // Similarly, if the variables are non-positive, then the total offset is 1302 // also non-positive and <= DecompGEP1.Offset. We have the following layout: 1303 // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size) 1304 // If -DecompGEP1.Offset >= V1Size, the accesses don't alias. 1305 if (AllNonPositive && V1Size.hasValue() && 1306 (-DecompGEP1.Offset).uge(V1Size.getValue())) 1307 return AliasResult::NoAlias; 1308 1309 if (V1Size.hasValue() && V2Size.hasValue()) { 1310 // Try to determine the range of values for VarIndex. 1311 // VarIndexRange is such that: 1312 // (VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex) && 1313 // VarIndexRange.contains(VarIndex) 1314 Optional<APInt> MinAbsVarIndex; 1315 Optional<ConstantRange> VarIndexRange; 1316 if (DecompGEP1.VarIndices.size() == 1) { 1317 // VarIndex = Scale*V. 1318 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0]; 1319 if (Var.Val.TruncBits == 0 && 1320 isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) { 1321 // If V != 0 then abs(VarIndex) >= abs(Scale). 1322 MinAbsVarIndex = Var.Scale.abs(); 1323 } 1324 ConstantRange R = Var.Val.evaluateWith( 1325 computeConstantRange(Var.Val.V, true, &AC, Var.CxtI)); 1326 if (!R.isFullSet() && !R.isEmptySet()) 1327 VarIndexRange = R.sextOrTrunc(Var.Scale.getBitWidth()) 1328 .smul_fast(ConstantRange(Var.Scale)); 1329 } else if (DecompGEP1.VarIndices.size() == 2) { 1330 // VarIndex = Scale*V0 + (-Scale)*V1. 1331 // If V0 != V1 then abs(VarIndex) >= abs(Scale). 1332 // Check that VisitedPhiBBs is empty, to avoid reasoning about 1333 // inequality of values across loop iterations. 1334 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0]; 1335 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1]; 1336 if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 && 1337 Var0.Val.hasSameCastsAs(Var1.Val) && VisitedPhiBBs.empty() && 1338 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr, 1339 DT)) 1340 MinAbsVarIndex = Var0.Scale.abs(); 1341 } 1342 1343 if (MinAbsVarIndex) { 1344 // The constant offset will have added at least +/-MinAbsVarIndex to it. 1345 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex; 1346 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex; 1347 // We know that Offset <= OffsetLo || Offset >= OffsetHi 1348 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) && 1349 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue())) 1350 return AliasResult::NoAlias; 1351 } 1352 1353 if (VarIndexRange) { 1354 ConstantRange OffsetRange = 1355 VarIndexRange->add(ConstantRange(DecompGEP1.Offset)); 1356 1357 // We know that Offset >= MinOffset. 1358 // (MinOffset >= V2Size) => (Offset >= V2Size) => NoAlias. 1359 if (OffsetRange.getSignedMin().sge(V2Size.getValue())) 1360 return AliasResult::NoAlias; 1361 1362 // We know that Offset <= MaxOffset. 1363 // (MaxOffset <= -V1Size) => (Offset <= -V1Size) => NoAlias. 1364 if (OffsetRange.getSignedMax().sle(-V1Size.getValue())) 1365 return AliasResult::NoAlias; 1366 } 1367 } 1368 1369 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT)) 1370 return AliasResult::NoAlias; 1371 } 1372 1373 // Statically, we can see that the base objects are the same, but the 1374 // pointers have dynamic offsets which we can't resolve. And none of our 1375 // little tricks above worked. 1376 return AliasResult::MayAlias; 1377 } 1378 1379 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1380 // If the results agree, take it. 1381 if (A == B) 1382 return A; 1383 // A mix of PartialAlias and MustAlias is PartialAlias. 1384 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) || 1385 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias)) 1386 return AliasResult::PartialAlias; 1387 // Otherwise, we don't know anything. 1388 return AliasResult::MayAlias; 1389 } 1390 1391 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1392 /// against another. 1393 AliasResult 1394 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, 1395 const Value *V2, LocationSize V2Size, 1396 AAQueryInfo &AAQI) { 1397 // If the values are Selects with the same condition, we can do a more precise 1398 // check: just check for aliases between the values on corresponding arms. 1399 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1400 if (SI->getCondition() == SI2->getCondition()) { 1401 AliasResult Alias = getBestAAResults().alias( 1402 MemoryLocation(SI->getTrueValue(), SISize), 1403 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI); 1404 if (Alias == AliasResult::MayAlias) 1405 return AliasResult::MayAlias; 1406 AliasResult ThisAlias = getBestAAResults().alias( 1407 MemoryLocation(SI->getFalseValue(), SISize), 1408 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI); 1409 return MergeAliasResults(ThisAlias, Alias); 1410 } 1411 1412 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1413 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1414 AliasResult Alias = getBestAAResults().alias( 1415 MemoryLocation(V2, V2Size), 1416 MemoryLocation(SI->getTrueValue(), SISize), AAQI); 1417 if (Alias == AliasResult::MayAlias) 1418 return AliasResult::MayAlias; 1419 1420 AliasResult ThisAlias = getBestAAResults().alias( 1421 MemoryLocation(V2, V2Size), 1422 MemoryLocation(SI->getFalseValue(), SISize), AAQI); 1423 return MergeAliasResults(ThisAlias, Alias); 1424 } 1425 1426 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1427 /// another. 1428 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, 1429 const Value *V2, LocationSize V2Size, 1430 AAQueryInfo &AAQI) { 1431 if (!PN->getNumIncomingValues()) 1432 return AliasResult::NoAlias; 1433 // If the values are PHIs in the same block, we can do a more precise 1434 // as well as efficient check: just check for aliases between the values 1435 // on corresponding edges. 1436 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1437 if (PN2->getParent() == PN->getParent()) { 1438 Optional<AliasResult> Alias; 1439 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1440 AliasResult ThisAlias = getBestAAResults().alias( 1441 MemoryLocation(PN->getIncomingValue(i), PNSize), 1442 MemoryLocation( 1443 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size), 1444 AAQI); 1445 if (Alias) 1446 *Alias = MergeAliasResults(*Alias, ThisAlias); 1447 else 1448 Alias = ThisAlias; 1449 if (*Alias == AliasResult::MayAlias) 1450 break; 1451 } 1452 return *Alias; 1453 } 1454 1455 SmallVector<Value *, 4> V1Srcs; 1456 // If a phi operand recurses back to the phi, we can still determine NoAlias 1457 // if we don't alias the underlying objects of the other phi operands, as we 1458 // know that the recursive phi needs to be based on them in some way. 1459 bool isRecursive = false; 1460 auto CheckForRecPhi = [&](Value *PV) { 1461 if (!EnableRecPhiAnalysis) 1462 return false; 1463 if (getUnderlyingObject(PV) == PN) { 1464 isRecursive = true; 1465 return true; 1466 } 1467 return false; 1468 }; 1469 1470 if (PV) { 1471 // If we have PhiValues then use it to get the underlying phi values. 1472 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN); 1473 // If we have more phi values than the search depth then return MayAlias 1474 // conservatively to avoid compile time explosion. The worst possible case 1475 // is if both sides are PHI nodes. In which case, this is O(m x n) time 1476 // where 'm' and 'n' are the number of PHI sources. 1477 if (PhiValueSet.size() > MaxLookupSearchDepth) 1478 return AliasResult::MayAlias; 1479 // Add the values to V1Srcs 1480 for (Value *PV1 : PhiValueSet) { 1481 if (CheckForRecPhi(PV1)) 1482 continue; 1483 V1Srcs.push_back(PV1); 1484 } 1485 } else { 1486 // If we don't have PhiInfo then just look at the operands of the phi itself 1487 // FIXME: Remove this once we can guarantee that we have PhiInfo always 1488 SmallPtrSet<Value *, 4> UniqueSrc; 1489 Value *OnePhi = nullptr; 1490 for (Value *PV1 : PN->incoming_values()) { 1491 if (isa<PHINode>(PV1)) { 1492 if (OnePhi && OnePhi != PV1) { 1493 // To control potential compile time explosion, we choose to be 1494 // conserviate when we have more than one Phi input. It is important 1495 // that we handle the single phi case as that lets us handle LCSSA 1496 // phi nodes and (combined with the recursive phi handling) simple 1497 // pointer induction variable patterns. 1498 return AliasResult::MayAlias; 1499 } 1500 OnePhi = PV1; 1501 } 1502 1503 if (CheckForRecPhi(PV1)) 1504 continue; 1505 1506 if (UniqueSrc.insert(PV1).second) 1507 V1Srcs.push_back(PV1); 1508 } 1509 1510 if (OnePhi && UniqueSrc.size() > 1) 1511 // Out of an abundance of caution, allow only the trivial lcssa and 1512 // recursive phi cases. 1513 return AliasResult::MayAlias; 1514 } 1515 1516 // If V1Srcs is empty then that means that the phi has no underlying non-phi 1517 // value. This should only be possible in blocks unreachable from the entry 1518 // block, but return MayAlias just in case. 1519 if (V1Srcs.empty()) 1520 return AliasResult::MayAlias; 1521 1522 // If this PHI node is recursive, indicate that the pointer may be moved 1523 // across iterations. We can only prove NoAlias if different underlying 1524 // objects are involved. 1525 if (isRecursive) 1526 PNSize = LocationSize::beforeOrAfterPointer(); 1527 1528 // In the recursive alias queries below, we may compare values from two 1529 // different loop iterations. Keep track of visited phi blocks, which will 1530 // be used when determining value equivalence. 1531 bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second; 1532 auto _ = make_scope_exit([&]() { 1533 if (BlockInserted) 1534 VisitedPhiBBs.erase(PN->getParent()); 1535 }); 1536 1537 // If we inserted a block into VisitedPhiBBs, alias analysis results that 1538 // have been cached earlier may no longer be valid. Perform recursive queries 1539 // with a new AAQueryInfo. 1540 AAQueryInfo NewAAQI = AAQI.withEmptyCache(); 1541 AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI; 1542 1543 AliasResult Alias = getBestAAResults().alias( 1544 MemoryLocation(V2, V2Size), 1545 MemoryLocation(V1Srcs[0], PNSize), *UseAAQI); 1546 1547 // Early exit if the check of the first PHI source against V2 is MayAlias. 1548 // Other results are not possible. 1549 if (Alias == AliasResult::MayAlias) 1550 return AliasResult::MayAlias; 1551 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will 1552 // remain valid to all elements and needs to conservatively return MayAlias. 1553 if (isRecursive && Alias != AliasResult::NoAlias) 1554 return AliasResult::MayAlias; 1555 1556 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1557 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1558 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1559 Value *V = V1Srcs[i]; 1560 1561 AliasResult ThisAlias = getBestAAResults().alias( 1562 MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI); 1563 Alias = MergeAliasResults(ThisAlias, Alias); 1564 if (Alias == AliasResult::MayAlias) 1565 break; 1566 } 1567 1568 return Alias; 1569 } 1570 1571 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1572 /// array references. 1573 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, 1574 const Value *V2, LocationSize V2Size, 1575 AAQueryInfo &AAQI) { 1576 // If either of the memory references is empty, it doesn't matter what the 1577 // pointer values are. 1578 if (V1Size.isZero() || V2Size.isZero()) 1579 return AliasResult::NoAlias; 1580 1581 // Strip off any casts if they exist. 1582 V1 = V1->stripPointerCastsForAliasAnalysis(); 1583 V2 = V2->stripPointerCastsForAliasAnalysis(); 1584 1585 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1586 // value for undef that aliases nothing in the program. 1587 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1588 return AliasResult::NoAlias; 1589 1590 // Are we checking for alias of the same value? 1591 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1592 // different iterations. We must therefore make sure that this is not the 1593 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1594 // happen by looking at the visited phi nodes and making sure they cannot 1595 // reach the value. 1596 if (isValueEqualInPotentialCycles(V1, V2)) 1597 return AliasResult::MustAlias; 1598 1599 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1600 return AliasResult::NoAlias; // Scalars cannot alias each other 1601 1602 // Figure out what objects these things are pointing to if we can. 1603 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth); 1604 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth); 1605 1606 // Null values in the default address space don't point to any object, so they 1607 // don't alias any other pointer. 1608 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1609 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1610 return AliasResult::NoAlias; 1611 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1612 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1613 return AliasResult::NoAlias; 1614 1615 if (O1 != O2) { 1616 // If V1/V2 point to two different objects, we know that we have no alias. 1617 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1618 return AliasResult::NoAlias; 1619 1620 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1621 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1622 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1623 return AliasResult::NoAlias; 1624 1625 // Function arguments can't alias with things that are known to be 1626 // unambigously identified at the function level. 1627 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1628 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1629 return AliasResult::NoAlias; 1630 1631 // If one pointer is the result of a call/invoke or load and the other is a 1632 // non-escaping local object within the same function, then we know the 1633 // object couldn't escape to a point where the call could return it. 1634 // 1635 // Note that if the pointers are in different functions, there are a 1636 // variety of complications. A call with a nocapture argument may still 1637 // temporary store the nocapture argument's value in a temporary memory 1638 // location if that memory location doesn't escape. Or it may pass a 1639 // nocapture value to other functions as long as they don't capture it. 1640 if (isEscapeSource(O1) && 1641 AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1))) 1642 return AliasResult::NoAlias; 1643 if (isEscapeSource(O2) && 1644 AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2))) 1645 return AliasResult::NoAlias; 1646 } 1647 1648 // If the size of one access is larger than the entire object on the other 1649 // side, then we know such behavior is undefined and can assume no alias. 1650 bool NullIsValidLocation = NullPointerIsDefined(&F); 1651 if ((isObjectSmallerThan( 1652 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL, 1653 TLI, NullIsValidLocation)) || 1654 (isObjectSmallerThan( 1655 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL, 1656 TLI, NullIsValidLocation))) 1657 return AliasResult::NoAlias; 1658 1659 // If one the accesses may be before the accessed pointer, canonicalize this 1660 // by using unknown after-pointer sizes for both accesses. This is 1661 // equivalent, because regardless of which pointer is lower, one of them 1662 // will always came after the other, as long as the underlying objects aren't 1663 // disjoint. We do this so that the rest of BasicAA does not have to deal 1664 // with accesses before the base pointer, and to improve cache utilization by 1665 // merging equivalent states. 1666 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) { 1667 V1Size = LocationSize::afterPointer(); 1668 V2Size = LocationSize::afterPointer(); 1669 } 1670 1671 // FIXME: If this depth limit is hit, then we may cache sub-optimal results 1672 // for recursive queries. For this reason, this limit is chosen to be large 1673 // enough to be very rarely hit, while still being small enough to avoid 1674 // stack overflows. 1675 if (AAQI.Depth >= 512) 1676 return AliasResult::MayAlias; 1677 1678 // Check the cache before climbing up use-def chains. This also terminates 1679 // otherwise infinitely recursive queries. 1680 AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size}); 1681 const bool Swapped = V1 > V2; 1682 if (Swapped) 1683 std::swap(Locs.first, Locs.second); 1684 const auto &Pair = AAQI.AliasCache.try_emplace( 1685 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0}); 1686 if (!Pair.second) { 1687 auto &Entry = Pair.first->second; 1688 if (!Entry.isDefinitive()) { 1689 // Remember that we used an assumption. 1690 ++Entry.NumAssumptionUses; 1691 ++AAQI.NumAssumptionUses; 1692 } 1693 // Cache contains sorted {V1,V2} pairs but we should return original order. 1694 auto Result = Entry.Result; 1695 Result.swap(Swapped); 1696 return Result; 1697 } 1698 1699 int OrigNumAssumptionUses = AAQI.NumAssumptionUses; 1700 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size(); 1701 AliasResult Result = 1702 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2); 1703 1704 auto It = AAQI.AliasCache.find(Locs); 1705 assert(It != AAQI.AliasCache.end() && "Must be in cache"); 1706 auto &Entry = It->second; 1707 1708 // Check whether a NoAlias assumption has been used, but disproven. 1709 bool AssumptionDisproven = 1710 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias; 1711 if (AssumptionDisproven) 1712 Result = AliasResult::MayAlias; 1713 1714 // This is a definitive result now, when considered as a root query. 1715 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses; 1716 Entry.Result = Result; 1717 // Cache contains sorted {V1,V2} pairs. 1718 Entry.Result.swap(Swapped); 1719 Entry.NumAssumptionUses = -1; 1720 1721 // If the assumption has been disproven, remove any results that may have 1722 // been based on this assumption. Do this after the Entry updates above to 1723 // avoid iterator invalidation. 1724 if (AssumptionDisproven) 1725 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults) 1726 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val()); 1727 1728 // The result may still be based on assumptions higher up in the chain. 1729 // Remember it, so it can be purged from the cache later. 1730 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses && 1731 Result != AliasResult::MayAlias) 1732 AAQI.AssumptionBasedResults.push_back(Locs); 1733 return Result; 1734 } 1735 1736 AliasResult BasicAAResult::aliasCheckRecursive( 1737 const Value *V1, LocationSize V1Size, 1738 const Value *V2, LocationSize V2Size, 1739 AAQueryInfo &AAQI, const Value *O1, const Value *O2) { 1740 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1741 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI); 1742 if (Result != AliasResult::MayAlias) 1743 return Result; 1744 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) { 1745 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI); 1746 if (Result != AliasResult::MayAlias) 1747 return Result; 1748 } 1749 1750 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1751 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI); 1752 if (Result != AliasResult::MayAlias) 1753 return Result; 1754 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) { 1755 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI); 1756 if (Result != AliasResult::MayAlias) 1757 return Result; 1758 } 1759 1760 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1761 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI); 1762 if (Result != AliasResult::MayAlias) 1763 return Result; 1764 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) { 1765 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI); 1766 if (Result != AliasResult::MayAlias) 1767 return Result; 1768 } 1769 1770 // If both pointers are pointing into the same object and one of them 1771 // accesses the entire object, then the accesses must overlap in some way. 1772 if (O1 == O2) { 1773 bool NullIsValidLocation = NullPointerIsDefined(&F); 1774 if (V1Size.isPrecise() && V2Size.isPrecise() && 1775 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || 1776 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) 1777 return AliasResult::PartialAlias; 1778 } 1779 1780 return AliasResult::MayAlias; 1781 } 1782 1783 /// Check whether two Values can be considered equivalent. 1784 /// 1785 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1786 /// they can not be part of a cycle in the value graph by looking at all 1787 /// visited phi nodes an making sure that the phis cannot reach the value. We 1788 /// have to do this because we are looking through phi nodes (That is we say 1789 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1790 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1791 const Value *V2) { 1792 if (V != V2) 1793 return false; 1794 1795 const Instruction *Inst = dyn_cast<Instruction>(V); 1796 if (!Inst) 1797 return true; 1798 1799 if (VisitedPhiBBs.empty()) 1800 return true; 1801 1802 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1803 return false; 1804 1805 // Make sure that the visited phis cannot reach the Value. This ensures that 1806 // the Values cannot come from different iterations of a potential cycle the 1807 // phi nodes could be involved in. 1808 for (auto *P : VisitedPhiBBs) 1809 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT)) 1810 return false; 1811 1812 return true; 1813 } 1814 1815 /// Computes the symbolic difference between two de-composed GEPs. 1816 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP, 1817 const DecomposedGEP &SrcGEP) { 1818 DestGEP.Offset -= SrcGEP.Offset; 1819 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) { 1820 // Find V in Dest. This is N^2, but pointer indices almost never have more 1821 // than a few variable indexes. 1822 bool Found = false; 1823 for (auto I : enumerate(DestGEP.VarIndices)) { 1824 VariableGEPIndex &Dest = I.value(); 1825 if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) || 1826 !Dest.Val.hasSameCastsAs(Src.Val)) 1827 continue; 1828 1829 // If we found it, subtract off Scale V's from the entry in Dest. If it 1830 // goes to zero, remove the entry. 1831 if (Dest.Scale != Src.Scale) { 1832 Dest.Scale -= Src.Scale; 1833 Dest.IsNSW = false; 1834 } else { 1835 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index()); 1836 } 1837 Found = true; 1838 break; 1839 } 1840 1841 // If we didn't consume this entry, add it to the end of the Dest list. 1842 if (!Found) { 1843 VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW}; 1844 DestGEP.VarIndices.push_back(Entry); 1845 } 1846 } 1847 } 1848 1849 bool BasicAAResult::constantOffsetHeuristic( 1850 const DecomposedGEP &GEP, LocationSize MaybeV1Size, 1851 LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) { 1852 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() || 1853 !MaybeV2Size.hasValue()) 1854 return false; 1855 1856 const uint64_t V1Size = MaybeV1Size.getValue(); 1857 const uint64_t V2Size = MaybeV2Size.getValue(); 1858 1859 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1]; 1860 1861 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) || 1862 Var0.Scale != -Var1.Scale || 1863 Var0.Val.V->getType() != Var1.Val.V->getType()) 1864 return false; 1865 1866 // We'll strip off the Extensions of Var0 and Var1 and do another round 1867 // of GetLinearExpression decomposition. In the example above, if Var0 1868 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 1869 1870 LinearExpression E0 = 1871 GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT); 1872 LinearExpression E1 = 1873 GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT); 1874 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) || 1875 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V)) 1876 return false; 1877 1878 // We have a hit - Var0 and Var1 only differ by a constant offset! 1879 1880 // If we've been sext'ed then zext'd the maximum difference between Var0 and 1881 // Var1 is possible to calculate, but we're just interested in the absolute 1882 // minimum difference between the two. The minimum distance may occur due to 1883 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 1884 // the minimum distance between %i and %i + 5 is 3. 1885 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff; 1886 MinDiff = APIntOps::umin(MinDiff, Wrapped); 1887 APInt MinDiffBytes = 1888 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs(); 1889 1890 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 1891 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 1892 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 1893 // V2Size can fit in the MinDiffBytes gap. 1894 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) && 1895 MinDiffBytes.uge(V2Size + GEP.Offset.abs()); 1896 } 1897 1898 //===----------------------------------------------------------------------===// 1899 // BasicAliasAnalysis Pass 1900 //===----------------------------------------------------------------------===// 1901 1902 AnalysisKey BasicAA::Key; 1903 1904 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 1905 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1906 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1907 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1908 auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F); 1909 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV); 1910 } 1911 1912 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 1913 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 1914 } 1915 1916 char BasicAAWrapperPass::ID = 0; 1917 1918 void BasicAAWrapperPass::anchor() {} 1919 1920 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", 1921 "Basic Alias Analysis (stateless AA impl)", true, true) 1922 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1923 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1924 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1925 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 1926 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa", 1927 "Basic Alias Analysis (stateless AA impl)", true, true) 1928 1929 FunctionPass *llvm::createBasicAAWrapperPass() { 1930 return new BasicAAWrapperPass(); 1931 } 1932 1933 bool BasicAAWrapperPass::runOnFunction(Function &F) { 1934 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 1935 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 1936 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 1937 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>(); 1938 1939 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, 1940 TLIWP.getTLI(F), ACT.getAssumptionCache(F), 1941 &DTWP.getDomTree(), 1942 PVWP ? &PVWP->getResult() : nullptr)); 1943 1944 return false; 1945 } 1946 1947 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1948 AU.setPreservesAll(); 1949 AU.addRequiredTransitive<AssumptionCacheTracker>(); 1950 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 1951 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 1952 AU.addUsedIfAvailable<PhiValuesWrapperPass>(); 1953 } 1954 1955 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 1956 return BasicAAResult( 1957 F.getParent()->getDataLayout(), F, 1958 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 1959 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 1960 } 1961