1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the primary stateless implementation of the 10 // Alias Analysis interface that implements identities (two different 11 // globals cannot alias, etc), but does no stateful analysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/BasicAliasAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ScopeExit.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/CFG.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/MemoryLocation.h" 28 #include "llvm/Analysis/PhiValues.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/ConstantRange.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Dominators.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GetElementPtrTypeIterator.h" 41 #include "llvm/IR/GlobalAlias.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/Metadata.h" 49 #include "llvm/IR/Operator.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Compiler.h" 58 #include "llvm/Support/KnownBits.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <cstdlib> 62 #include <utility> 63 64 #define DEBUG_TYPE "basicaa" 65 66 using namespace llvm; 67 68 /// Enable analysis of recursive PHI nodes. 69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, 70 cl::init(true)); 71 72 /// SearchLimitReached / SearchTimes shows how often the limit of 73 /// to decompose GEPs is reached. It will affect the precision 74 /// of basic alias analysis. 75 STATISTIC(SearchLimitReached, "Number of times the limit to " 76 "decompose GEPs is reached"); 77 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 78 79 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 80 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 81 /// careful with value equivalence. We use reachability to make sure a value 82 /// cannot be involved in a cycle. 83 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 84 85 // The max limit of the search depth in DecomposeGEPExpression() and 86 // getUnderlyingObject(). 87 static const unsigned MaxLookupSearchDepth = 6; 88 89 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, 90 FunctionAnalysisManager::Invalidator &Inv) { 91 // We don't care if this analysis itself is preserved, it has no state. But 92 // we need to check that the analyses it depends on have been. Note that we 93 // may be created without handles to some analyses and in that case don't 94 // depend on them. 95 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) || 96 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) || 97 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA))) 98 return true; 99 100 // Otherwise this analysis result remains valid. 101 return false; 102 } 103 104 //===----------------------------------------------------------------------===// 105 // Useful predicates 106 //===----------------------------------------------------------------------===// 107 108 /// Returns true if the pointer is one which would have been considered an 109 /// escape by isNonEscapingLocalObject. 110 static bool isEscapeSource(const Value *V) { 111 if (isa<CallBase>(V)) 112 return true; 113 114 // The load case works because isNonEscapingLocalObject considers all 115 // stores to be escapes (it passes true for the StoreCaptures argument 116 // to PointerMayBeCaptured). 117 if (isa<LoadInst>(V)) 118 return true; 119 120 // The inttoptr case works because isNonEscapingLocalObject considers all 121 // means of converting or equating a pointer to an int (ptrtoint, ptr store 122 // which could be followed by an integer load, ptr<->int compare) as 123 // escaping, and objects located at well-known addresses via platform-specific 124 // means cannot be considered non-escaping local objects. 125 if (isa<IntToPtrInst>(V)) 126 return true; 127 128 return false; 129 } 130 131 /// Returns the size of the object specified by V or UnknownSize if unknown. 132 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 133 const TargetLibraryInfo &TLI, 134 bool NullIsValidLoc, 135 bool RoundToAlign = false) { 136 uint64_t Size; 137 ObjectSizeOpts Opts; 138 Opts.RoundToAlign = RoundToAlign; 139 Opts.NullIsUnknownSize = NullIsValidLoc; 140 if (getObjectSize(V, Size, DL, &TLI, Opts)) 141 return Size; 142 return MemoryLocation::UnknownSize; 143 } 144 145 /// Returns true if we can prove that the object specified by V is smaller than 146 /// Size. 147 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 148 const DataLayout &DL, 149 const TargetLibraryInfo &TLI, 150 bool NullIsValidLoc) { 151 // Note that the meanings of the "object" are slightly different in the 152 // following contexts: 153 // c1: llvm::getObjectSize() 154 // c2: llvm.objectsize() intrinsic 155 // c3: isObjectSmallerThan() 156 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 157 // refers to the "entire object". 158 // 159 // Consider this example: 160 // char *p = (char*)malloc(100) 161 // char *q = p+80; 162 // 163 // In the context of c1 and c2, the "object" pointed by q refers to the 164 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 165 // 166 // However, in the context of c3, the "object" refers to the chunk of memory 167 // being allocated. So, the "object" has 100 bytes, and q points to the middle 168 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 169 // parameter, before the llvm::getObjectSize() is called to get the size of 170 // entire object, we should: 171 // - either rewind the pointer q to the base-address of the object in 172 // question (in this case rewind to p), or 173 // - just give up. It is up to caller to make sure the pointer is pointing 174 // to the base address the object. 175 // 176 // We go for 2nd option for simplicity. 177 if (!isIdentifiedObject(V)) 178 return false; 179 180 // This function needs to use the aligned object size because we allow 181 // reads a bit past the end given sufficient alignment. 182 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, 183 /*RoundToAlign*/ true); 184 185 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 186 } 187 188 /// Return the minimal extent from \p V to the end of the underlying object, 189 /// assuming the result is used in an aliasing query. E.g., we do use the query 190 /// location size and the fact that null pointers cannot alias here. 191 static uint64_t getMinimalExtentFrom(const Value &V, 192 const LocationSize &LocSize, 193 const DataLayout &DL, 194 bool NullIsValidLoc) { 195 // If we have dereferenceability information we know a lower bound for the 196 // extent as accesses for a lower offset would be valid. We need to exclude 197 // the "or null" part if null is a valid pointer. We can ignore frees, as an 198 // access after free would be undefined behavior. 199 bool CanBeNull, CanBeFreed; 200 uint64_t DerefBytes = 201 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 202 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; 203 // If queried with a precise location size, we assume that location size to be 204 // accessed, thus valid. 205 if (LocSize.isPrecise()) 206 DerefBytes = std::max(DerefBytes, LocSize.getValue()); 207 return DerefBytes; 208 } 209 210 /// Returns true if we can prove that the object specified by V has size Size. 211 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 212 const TargetLibraryInfo &TLI, bool NullIsValidLoc) { 213 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc); 214 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 215 } 216 217 //===----------------------------------------------------------------------===// 218 // CaptureInfo implementations 219 //===----------------------------------------------------------------------===// 220 221 CaptureInfo::~CaptureInfo() = default; 222 223 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object, 224 const Instruction *I) { 225 return isNonEscapingLocalObject(Object, &IsCapturedCache); 226 } 227 228 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object, 229 const Instruction *I) { 230 if (!isIdentifiedFunctionLocal(Object)) 231 return false; 232 233 auto Iter = EarliestEscapes.insert({Object, nullptr}); 234 if (Iter.second) { 235 Instruction *EarliestCapture = FindEarliestCapture( 236 Object, *const_cast<Function *>(I->getFunction()), 237 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT); 238 if (EarliestCapture) { 239 auto Ins = Inst2Obj.insert({EarliestCapture, {}}); 240 Ins.first->second.push_back(Object); 241 } 242 Iter.first->second = EarliestCapture; 243 } 244 245 // No capturing instruction. 246 if (!Iter.first->second) 247 return true; 248 249 return I != Iter.first->second && 250 !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI); 251 } 252 253 void EarliestEscapeInfo::removeInstruction(Instruction *I) { 254 auto Iter = Inst2Obj.find(I); 255 if (Iter != Inst2Obj.end()) { 256 for (const Value *Obj : Iter->second) 257 EarliestEscapes.erase(Obj); 258 Inst2Obj.erase(I); 259 } 260 } 261 262 //===----------------------------------------------------------------------===// 263 // GetElementPtr Instruction Decomposition and Analysis 264 //===----------------------------------------------------------------------===// 265 266 namespace { 267 /// Represents zext(sext(trunc(V))). 268 struct CastedValue { 269 const Value *V; 270 unsigned ZExtBits = 0; 271 unsigned SExtBits = 0; 272 unsigned TruncBits = 0; 273 274 explicit CastedValue(const Value *V) : V(V) {} 275 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits, 276 unsigned TruncBits) 277 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {} 278 279 unsigned getBitWidth() const { 280 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits + 281 SExtBits; 282 } 283 284 CastedValue withValue(const Value *NewV) const { 285 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits); 286 } 287 288 /// Replace V with zext(NewV) 289 CastedValue withZExtOfValue(const Value *NewV) const { 290 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - 291 NewV->getType()->getPrimitiveSizeInBits(); 292 if (ExtendBy <= TruncBits) 293 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy); 294 295 // zext(sext(zext(NewV))) == zext(zext(zext(NewV))) 296 ExtendBy -= TruncBits; 297 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0); 298 } 299 300 /// Replace V with sext(NewV) 301 CastedValue withSExtOfValue(const Value *NewV) const { 302 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - 303 NewV->getType()->getPrimitiveSizeInBits(); 304 if (ExtendBy <= TruncBits) 305 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy); 306 307 // zext(sext(sext(NewV))) 308 ExtendBy -= TruncBits; 309 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0); 310 } 311 312 APInt evaluateWith(APInt N) const { 313 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 314 "Incompatible bit width"); 315 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits); 316 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits); 317 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits); 318 return N; 319 } 320 321 ConstantRange evaluateWith(ConstantRange N) const { 322 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 323 "Incompatible bit width"); 324 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits); 325 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits); 326 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits); 327 return N; 328 } 329 330 bool canDistributeOver(bool NUW, bool NSW) const { 331 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y) 332 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y) 333 // trunc(x op y) == trunc(x) op trunc(y) 334 return (!ZExtBits || NUW) && (!SExtBits || NSW); 335 } 336 337 bool hasSameCastsAs(const CastedValue &Other) const { 338 return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits && 339 TruncBits == Other.TruncBits; 340 } 341 }; 342 343 /// Represents zext(sext(trunc(V))) * Scale + Offset. 344 struct LinearExpression { 345 CastedValue Val; 346 APInt Scale; 347 APInt Offset; 348 349 /// True if all operations in this expression are NSW. 350 bool IsNSW; 351 352 LinearExpression(const CastedValue &Val, const APInt &Scale, 353 const APInt &Offset, bool IsNSW) 354 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {} 355 356 LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) { 357 unsigned BitWidth = Val.getBitWidth(); 358 Scale = APInt(BitWidth, 1); 359 Offset = APInt(BitWidth, 0); 360 } 361 }; 362 } 363 364 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 365 /// B are constant integers. 366 static LinearExpression GetLinearExpression( 367 const CastedValue &Val, const DataLayout &DL, unsigned Depth, 368 AssumptionCache *AC, DominatorTree *DT) { 369 // Limit our recursion depth. 370 if (Depth == 6) 371 return Val; 372 373 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V)) 374 return LinearExpression(Val, APInt(Val.getBitWidth(), 0), 375 Val.evaluateWith(Const->getValue()), true); 376 377 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) { 378 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 379 APInt RHS = Val.evaluateWith(RHSC->getValue()); 380 // The only non-OBO case we deal with is or, and only limited to the 381 // case where it is both nuw and nsw. 382 bool NUW = true, NSW = true; 383 if (isa<OverflowingBinaryOperator>(BOp)) { 384 NUW &= BOp->hasNoUnsignedWrap(); 385 NSW &= BOp->hasNoSignedWrap(); 386 } 387 if (!Val.canDistributeOver(NUW, NSW)) 388 return Val; 389 390 // While we can distribute over trunc, we cannot preserve nowrap flags 391 // in that case. 392 if (Val.TruncBits) 393 NUW = NSW = false; 394 395 LinearExpression E(Val); 396 switch (BOp->getOpcode()) { 397 default: 398 // We don't understand this instruction, so we can't decompose it any 399 // further. 400 return Val; 401 case Instruction::Or: 402 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 403 // analyze it. 404 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 405 BOp, DT)) 406 return Val; 407 408 LLVM_FALLTHROUGH; 409 case Instruction::Add: { 410 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 411 Depth + 1, AC, DT); 412 E.Offset += RHS; 413 E.IsNSW &= NSW; 414 break; 415 } 416 case Instruction::Sub: { 417 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 418 Depth + 1, AC, DT); 419 E.Offset -= RHS; 420 E.IsNSW &= NSW; 421 break; 422 } 423 case Instruction::Mul: { 424 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 425 Depth + 1, AC, DT); 426 E.Offset *= RHS; 427 E.Scale *= RHS; 428 E.IsNSW &= NSW; 429 break; 430 } 431 case Instruction::Shl: 432 // We're trying to linearize an expression of the kind: 433 // shl i8 -128, 36 434 // where the shift count exceeds the bitwidth of the type. 435 // We can't decompose this further (the expression would return 436 // a poison value). 437 if (RHS.getLimitedValue() > Val.getBitWidth()) 438 return Val; 439 440 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 441 Depth + 1, AC, DT); 442 E.Offset <<= RHS.getLimitedValue(); 443 E.Scale <<= RHS.getLimitedValue(); 444 E.IsNSW &= NSW; 445 break; 446 } 447 return E; 448 } 449 } 450 451 if (isa<ZExtInst>(Val.V)) 452 return GetLinearExpression( 453 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), 454 DL, Depth + 1, AC, DT); 455 456 if (isa<SExtInst>(Val.V)) 457 return GetLinearExpression( 458 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), 459 DL, Depth + 1, AC, DT); 460 461 return Val; 462 } 463 464 /// To ensure a pointer offset fits in an integer of size PointerSize 465 /// (in bits) when that size is smaller than the maximum pointer size. This is 466 /// an issue, for example, in particular for 32b pointers with negative indices 467 /// that rely on two's complement wrap-arounds for precise alias information 468 /// where the maximum pointer size is 64b. 469 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) { 470 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!"); 471 unsigned ShiftBits = Offset.getBitWidth() - PointerSize; 472 return (Offset << ShiftBits).ashr(ShiftBits); 473 } 474 475 namespace { 476 // A linear transformation of a Value; this class represents 477 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale. 478 struct VariableGEPIndex { 479 CastedValue Val; 480 APInt Scale; 481 482 // Context instruction to use when querying information about this index. 483 const Instruction *CxtI; 484 485 /// True if all operations in this expression are NSW. 486 bool IsNSW; 487 488 void dump() const { 489 print(dbgs()); 490 dbgs() << "\n"; 491 } 492 void print(raw_ostream &OS) const { 493 OS << "(V=" << Val.V->getName() 494 << ", zextbits=" << Val.ZExtBits 495 << ", sextbits=" << Val.SExtBits 496 << ", truncbits=" << Val.TruncBits 497 << ", scale=" << Scale << ")"; 498 } 499 }; 500 } 501 502 // Represents the internal structure of a GEP, decomposed into a base pointer, 503 // constant offsets, and variable scaled indices. 504 struct BasicAAResult::DecomposedGEP { 505 // Base pointer of the GEP 506 const Value *Base; 507 // Total constant offset from base. 508 APInt Offset; 509 // Scaled variable (non-constant) indices. 510 SmallVector<VariableGEPIndex, 4> VarIndices; 511 // Are all operations inbounds GEPs or non-indexing operations? 512 // (None iff expression doesn't involve any geps) 513 Optional<bool> InBounds; 514 515 void dump() const { 516 print(dbgs()); 517 dbgs() << "\n"; 518 } 519 void print(raw_ostream &OS) const { 520 OS << "(DecomposedGEP Base=" << Base->getName() 521 << ", Offset=" << Offset 522 << ", VarIndices=["; 523 for (size_t i = 0; i < VarIndices.size(); i++) { 524 if (i != 0) 525 OS << ", "; 526 VarIndices[i].print(OS); 527 } 528 OS << "])"; 529 } 530 }; 531 532 533 /// If V is a symbolic pointer expression, decompose it into a base pointer 534 /// with a constant offset and a number of scaled symbolic offsets. 535 /// 536 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 537 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 538 /// specified amount, but which may have other unrepresented high bits. As 539 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 540 BasicAAResult::DecomposedGEP 541 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL, 542 AssumptionCache *AC, DominatorTree *DT) { 543 // Limit recursion depth to limit compile time in crazy cases. 544 unsigned MaxLookup = MaxLookupSearchDepth; 545 SearchTimes++; 546 const Instruction *CxtI = dyn_cast<Instruction>(V); 547 548 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits(); 549 DecomposedGEP Decomposed; 550 Decomposed.Offset = APInt(MaxPointerSize, 0); 551 do { 552 // See if this is a bitcast or GEP. 553 const Operator *Op = dyn_cast<Operator>(V); 554 if (!Op) { 555 // The only non-operator case we can handle are GlobalAliases. 556 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 557 if (!GA->isInterposable()) { 558 V = GA->getAliasee(); 559 continue; 560 } 561 } 562 Decomposed.Base = V; 563 return Decomposed; 564 } 565 566 if (Op->getOpcode() == Instruction::BitCast || 567 Op->getOpcode() == Instruction::AddrSpaceCast) { 568 V = Op->getOperand(0); 569 continue; 570 } 571 572 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 573 if (!GEPOp) { 574 if (const auto *PHI = dyn_cast<PHINode>(V)) { 575 // Look through single-arg phi nodes created by LCSSA. 576 if (PHI->getNumIncomingValues() == 1) { 577 V = PHI->getIncomingValue(0); 578 continue; 579 } 580 } else if (const auto *Call = dyn_cast<CallBase>(V)) { 581 // CaptureTracking can know about special capturing properties of some 582 // intrinsics like launder.invariant.group, that can't be expressed with 583 // the attributes, but have properties like returning aliasing pointer. 584 // Because some analysis may assume that nocaptured pointer is not 585 // returned from some special intrinsic (because function would have to 586 // be marked with returns attribute), it is crucial to use this function 587 // because it should be in sync with CaptureTracking. Not using it may 588 // cause weird miscompilations where 2 aliasing pointers are assumed to 589 // noalias. 590 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 591 V = RP; 592 continue; 593 } 594 } 595 596 Decomposed.Base = V; 597 return Decomposed; 598 } 599 600 // Track whether we've seen at least one in bounds gep, and if so, whether 601 // all geps parsed were in bounds. 602 if (Decomposed.InBounds == None) 603 Decomposed.InBounds = GEPOp->isInBounds(); 604 else if (!GEPOp->isInBounds()) 605 Decomposed.InBounds = false; 606 607 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized"); 608 609 // Don't attempt to analyze GEPs if index scale is not a compile-time 610 // constant. 611 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) { 612 Decomposed.Base = V; 613 return Decomposed; 614 } 615 616 unsigned AS = GEPOp->getPointerAddressSpace(); 617 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 618 gep_type_iterator GTI = gep_type_begin(GEPOp); 619 unsigned PointerSize = DL.getPointerSizeInBits(AS); 620 // Assume all GEP operands are constants until proven otherwise. 621 bool GepHasConstantOffset = true; 622 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 623 I != E; ++I, ++GTI) { 624 const Value *Index = *I; 625 // Compute the (potentially symbolic) offset in bytes for this index. 626 if (StructType *STy = GTI.getStructTypeOrNull()) { 627 // For a struct, add the member offset. 628 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 629 if (FieldNo == 0) 630 continue; 631 632 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo); 633 continue; 634 } 635 636 // For an array/pointer, add the element offset, explicitly scaled. 637 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 638 if (CIdx->isZero()) 639 continue; 640 Decomposed.Offset += 641 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() * 642 CIdx->getValue().sextOrTrunc(MaxPointerSize); 643 continue; 644 } 645 646 GepHasConstantOffset = false; 647 648 APInt Scale(MaxPointerSize, 649 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); 650 // If the integer type is smaller than the pointer size, it is implicitly 651 // sign extended to pointer size. 652 unsigned Width = Index->getType()->getIntegerBitWidth(); 653 unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0; 654 unsigned TruncBits = PointerSize < Width ? Width - PointerSize : 0; 655 LinearExpression LE = GetLinearExpression( 656 CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT); 657 658 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 659 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 660 Decomposed.Offset += LE.Offset.sextOrTrunc(MaxPointerSize) * Scale; 661 Scale *= LE.Scale.sextOrTrunc(MaxPointerSize); 662 663 // If we already had an occurrence of this index variable, merge this 664 // scale into it. For example, we want to handle: 665 // A[x][x] -> x*16 + x*4 -> x*20 666 // This also ensures that 'x' only appears in the index list once. 667 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 668 if (Decomposed.VarIndices[i].Val.V == LE.Val.V && 669 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) { 670 Scale += Decomposed.VarIndices[i].Scale; 671 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 672 break; 673 } 674 } 675 676 // Make sure that we have a scale that makes sense for this target's 677 // pointer size. 678 Scale = adjustToPointerSize(Scale, PointerSize); 679 680 if (!!Scale) { 681 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW}; 682 Decomposed.VarIndices.push_back(Entry); 683 } 684 } 685 686 // Take care of wrap-arounds 687 if (GepHasConstantOffset) 688 Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize); 689 690 // Analyze the base pointer next. 691 V = GEPOp->getOperand(0); 692 } while (--MaxLookup); 693 694 // If the chain of expressions is too deep, just return early. 695 Decomposed.Base = V; 696 SearchLimitReached++; 697 return Decomposed; 698 } 699 700 /// Returns whether the given pointer value points to memory that is local to 701 /// the function, with global constants being considered local to all 702 /// functions. 703 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 704 AAQueryInfo &AAQI, bool OrLocal) { 705 assert(Visited.empty() && "Visited must be cleared after use!"); 706 707 unsigned MaxLookup = 8; 708 SmallVector<const Value *, 16> Worklist; 709 Worklist.push_back(Loc.Ptr); 710 do { 711 const Value *V = getUnderlyingObject(Worklist.pop_back_val()); 712 if (!Visited.insert(V).second) { 713 Visited.clear(); 714 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 715 } 716 717 // An alloca instruction defines local memory. 718 if (OrLocal && isa<AllocaInst>(V)) 719 continue; 720 721 // A global constant counts as local memory for our purposes. 722 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 723 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 724 // global to be marked constant in some modules and non-constant in 725 // others. GV may even be a declaration, not a definition. 726 if (!GV->isConstant()) { 727 Visited.clear(); 728 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 729 } 730 continue; 731 } 732 733 // If both select values point to local memory, then so does the select. 734 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 735 Worklist.push_back(SI->getTrueValue()); 736 Worklist.push_back(SI->getFalseValue()); 737 continue; 738 } 739 740 // If all values incoming to a phi node point to local memory, then so does 741 // the phi. 742 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 743 // Don't bother inspecting phi nodes with many operands. 744 if (PN->getNumIncomingValues() > MaxLookup) { 745 Visited.clear(); 746 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 747 } 748 append_range(Worklist, PN->incoming_values()); 749 continue; 750 } 751 752 // Otherwise be conservative. 753 Visited.clear(); 754 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 755 } while (!Worklist.empty() && --MaxLookup); 756 757 Visited.clear(); 758 return Worklist.empty(); 759 } 760 761 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { 762 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call); 763 return II && II->getIntrinsicID() == IID; 764 } 765 766 /// Returns the behavior when calling the given call site. 767 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { 768 if (Call->doesNotAccessMemory()) 769 // Can't do better than this. 770 return FMRB_DoesNotAccessMemory; 771 772 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 773 774 // If the callsite knows it only reads memory, don't return worse 775 // than that. 776 if (Call->onlyReadsMemory()) 777 Min = FMRB_OnlyReadsMemory; 778 else if (Call->doesNotReadMemory()) 779 Min = FMRB_OnlyWritesMemory; 780 781 if (Call->onlyAccessesArgMemory()) 782 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 783 else if (Call->onlyAccessesInaccessibleMemory()) 784 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 785 else if (Call->onlyAccessesInaccessibleMemOrArgMem()) 786 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 787 788 // If the call has operand bundles then aliasing attributes from the function 789 // it calls do not directly apply to the call. This can be made more precise 790 // in the future. 791 if (!Call->hasOperandBundles()) 792 if (const Function *F = Call->getCalledFunction()) 793 Min = 794 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 795 796 return Min; 797 } 798 799 /// Returns the behavior when calling the given function. For use when the call 800 /// site is not known. 801 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 802 // If the function declares it doesn't access memory, we can't do better. 803 if (F->doesNotAccessMemory()) 804 return FMRB_DoesNotAccessMemory; 805 806 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 807 808 // If the function declares it only reads memory, go with that. 809 if (F->onlyReadsMemory()) 810 Min = FMRB_OnlyReadsMemory; 811 else if (F->doesNotReadMemory()) 812 Min = FMRB_OnlyWritesMemory; 813 814 if (F->onlyAccessesArgMemory()) 815 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 816 else if (F->onlyAccessesInaccessibleMemory()) 817 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 818 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 819 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 820 821 return Min; 822 } 823 824 /// Returns true if this is a writeonly (i.e Mod only) parameter. 825 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, 826 const TargetLibraryInfo &TLI) { 827 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) 828 return true; 829 830 // We can bound the aliasing properties of memset_pattern16 just as we can 831 // for memcpy/memset. This is particularly important because the 832 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 833 // whenever possible. 834 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 835 // attributes. 836 LibFunc F; 837 if (Call->getCalledFunction() && 838 TLI.getLibFunc(*Call->getCalledFunction(), F) && 839 F == LibFunc_memset_pattern16 && TLI.has(F)) 840 if (ArgIdx == 0) 841 return true; 842 843 // TODO: memset_pattern4, memset_pattern8 844 // TODO: _chk variants 845 // TODO: strcmp, strcpy 846 847 return false; 848 } 849 850 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, 851 unsigned ArgIdx) { 852 // Checking for known builtin intrinsics and target library functions. 853 if (isWriteOnlyParam(Call, ArgIdx, TLI)) 854 return ModRefInfo::Mod; 855 856 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) 857 return ModRefInfo::Ref; 858 859 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) 860 return ModRefInfo::NoModRef; 861 862 return AAResultBase::getArgModRefInfo(Call, ArgIdx); 863 } 864 865 #ifndef NDEBUG 866 static const Function *getParent(const Value *V) { 867 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 868 if (!inst->getParent()) 869 return nullptr; 870 return inst->getParent()->getParent(); 871 } 872 873 if (const Argument *arg = dyn_cast<Argument>(V)) 874 return arg->getParent(); 875 876 return nullptr; 877 } 878 879 static bool notDifferentParent(const Value *O1, const Value *O2) { 880 881 const Function *F1 = getParent(O1); 882 const Function *F2 = getParent(O2); 883 884 return !F1 || !F2 || F1 == F2; 885 } 886 #endif 887 888 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 889 const MemoryLocation &LocB, 890 AAQueryInfo &AAQI) { 891 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 892 "BasicAliasAnalysis doesn't support interprocedural queries."); 893 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI); 894 } 895 896 /// Checks to see if the specified callsite can clobber the specified memory 897 /// object. 898 /// 899 /// Since we only look at local properties of this function, we really can't 900 /// say much about this query. We do, however, use simple "address taken" 901 /// analysis on local objects. 902 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, 903 const MemoryLocation &Loc, 904 AAQueryInfo &AAQI) { 905 assert(notDifferentParent(Call, Loc.Ptr) && 906 "AliasAnalysis query involving multiple functions!"); 907 908 const Value *Object = getUnderlyingObject(Loc.Ptr); 909 910 // Calls marked 'tail' cannot read or write allocas from the current frame 911 // because the current frame might be destroyed by the time they run. However, 912 // a tail call may use an alloca with byval. Calling with byval copies the 913 // contents of the alloca into argument registers or stack slots, so there is 914 // no lifetime issue. 915 if (isa<AllocaInst>(Object)) 916 if (const CallInst *CI = dyn_cast<CallInst>(Call)) 917 if (CI->isTailCall() && 918 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 919 return ModRefInfo::NoModRef; 920 921 // Stack restore is able to modify unescaped dynamic allocas. Assume it may 922 // modify them even though the alloca is not escaped. 923 if (auto *AI = dyn_cast<AllocaInst>(Object)) 924 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore)) 925 return ModRefInfo::Mod; 926 927 // If the pointer is to a locally allocated object that does not escape, 928 // then the call can not mod/ref the pointer unless the call takes the pointer 929 // as an argument, and itself doesn't capture it. 930 if (!isa<Constant>(Object) && Call != Object && 931 AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) { 932 933 // Optimistically assume that call doesn't touch Object and check this 934 // assumption in the following loop. 935 ModRefInfo Result = ModRefInfo::NoModRef; 936 bool IsMustAlias = true; 937 938 unsigned OperandNo = 0; 939 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); 940 CI != CE; ++CI, ++OperandNo) { 941 // Only look at the no-capture or byval pointer arguments. If this 942 // pointer were passed to arguments that were neither of these, then it 943 // couldn't be no-capture. 944 if (!(*CI)->getType()->isPointerTy() || 945 (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() && 946 !Call->isByValArgument(OperandNo))) 947 continue; 948 949 // Call doesn't access memory through this operand, so we don't care 950 // if it aliases with Object. 951 if (Call->doesNotAccessMemory(OperandNo)) 952 continue; 953 954 // If this is a no-capture pointer argument, see if we can tell that it 955 // is impossible to alias the pointer we're checking. 956 AliasResult AR = getBestAAResults().alias( 957 MemoryLocation::getBeforeOrAfter(*CI), 958 MemoryLocation::getBeforeOrAfter(Object), AAQI); 959 if (AR != AliasResult::MustAlias) 960 IsMustAlias = false; 961 // Operand doesn't alias 'Object', continue looking for other aliases 962 if (AR == AliasResult::NoAlias) 963 continue; 964 // Operand aliases 'Object', but call doesn't modify it. Strengthen 965 // initial assumption and keep looking in case if there are more aliases. 966 if (Call->onlyReadsMemory(OperandNo)) { 967 Result = setRef(Result); 968 continue; 969 } 970 // Operand aliases 'Object' but call only writes into it. 971 if (Call->doesNotReadMemory(OperandNo)) { 972 Result = setMod(Result); 973 continue; 974 } 975 // This operand aliases 'Object' and call reads and writes into it. 976 // Setting ModRef will not yield an early return below, MustAlias is not 977 // used further. 978 Result = ModRefInfo::ModRef; 979 break; 980 } 981 982 // No operand aliases, reset Must bit. Add below if at least one aliases 983 // and all aliases found are MustAlias. 984 if (isNoModRef(Result)) 985 IsMustAlias = false; 986 987 // Early return if we improved mod ref information 988 if (!isModAndRefSet(Result)) { 989 if (isNoModRef(Result)) 990 return ModRefInfo::NoModRef; 991 return IsMustAlias ? setMust(Result) : clearMust(Result); 992 } 993 } 994 995 // If the call is malloc/calloc like, we can assume that it doesn't 996 // modify any IR visible value. This is only valid because we assume these 997 // routines do not read values visible in the IR. TODO: Consider special 998 // casing realloc and strdup routines which access only their arguments as 999 // well. Or alternatively, replace all of this with inaccessiblememonly once 1000 // that's implemented fully. 1001 if (isMallocOrCallocLikeFn(Call, &TLI)) { 1002 // Be conservative if the accessed pointer may alias the allocation - 1003 // fallback to the generic handling below. 1004 if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc, 1005 AAQI) == AliasResult::NoAlias) 1006 return ModRefInfo::NoModRef; 1007 } 1008 1009 // The semantics of memcpy intrinsics either exactly overlap or do not 1010 // overlap, i.e., source and destination of any given memcpy are either 1011 // no-alias or must-alias. 1012 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) { 1013 AliasResult SrcAA = 1014 getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI); 1015 AliasResult DestAA = 1016 getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI); 1017 // It's also possible for Loc to alias both src and dest, or neither. 1018 ModRefInfo rv = ModRefInfo::NoModRef; 1019 if (SrcAA != AliasResult::NoAlias) 1020 rv = setRef(rv); 1021 if (DestAA != AliasResult::NoAlias) 1022 rv = setMod(rv); 1023 return rv; 1024 } 1025 1026 // Guard intrinsics are marked as arbitrarily writing so that proper control 1027 // dependencies are maintained but they never mods any particular memory 1028 // location. 1029 // 1030 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1031 // heap state at the point the guard is issued needs to be consistent in case 1032 // the guard invokes the "deopt" continuation. 1033 if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) 1034 return ModRefInfo::Ref; 1035 // The same applies to deoptimize which is essentially a guard(false). 1036 if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize)) 1037 return ModRefInfo::Ref; 1038 1039 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 1040 // writing so that proper control dependencies are maintained but they never 1041 // mod any particular memory location visible to the IR. 1042 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 1043 // intrinsic is now modeled as reading memory. This prevents hoisting the 1044 // invariant.start intrinsic over stores. Consider: 1045 // *ptr = 40; 1046 // *ptr = 50; 1047 // invariant_start(ptr) 1048 // int val = *ptr; 1049 // print(val); 1050 // 1051 // This cannot be transformed to: 1052 // 1053 // *ptr = 40; 1054 // invariant_start(ptr) 1055 // *ptr = 50; 1056 // int val = *ptr; 1057 // print(val); 1058 // 1059 // The transformation will cause the second store to be ignored (based on 1060 // rules of invariant.start) and print 40, while the first program always 1061 // prints 50. 1062 if (isIntrinsicCall(Call, Intrinsic::invariant_start)) 1063 return ModRefInfo::Ref; 1064 1065 // The AAResultBase base class has some smarts, lets use them. 1066 return AAResultBase::getModRefInfo(Call, Loc, AAQI); 1067 } 1068 1069 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, 1070 const CallBase *Call2, 1071 AAQueryInfo &AAQI) { 1072 // Guard intrinsics are marked as arbitrarily writing so that proper control 1073 // dependencies are maintained but they never mods any particular memory 1074 // location. 1075 // 1076 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1077 // heap state at the point the guard is issued needs to be consistent in case 1078 // the guard invokes the "deopt" continuation. 1079 1080 // NB! This function is *not* commutative, so we special case two 1081 // possibilities for guard intrinsics. 1082 1083 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) 1084 return isModSet(createModRefInfo(getModRefBehavior(Call2))) 1085 ? ModRefInfo::Ref 1086 : ModRefInfo::NoModRef; 1087 1088 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) 1089 return isModSet(createModRefInfo(getModRefBehavior(Call1))) 1090 ? ModRefInfo::Mod 1091 : ModRefInfo::NoModRef; 1092 1093 // The AAResultBase base class has some smarts, lets use them. 1094 return AAResultBase::getModRefInfo(Call1, Call2, AAQI); 1095 } 1096 1097 /// Return true if we know V to the base address of the corresponding memory 1098 /// object. This implies that any address less than V must be out of bounds 1099 /// for the underlying object. Note that just being isIdentifiedObject() is 1100 /// not enough - For example, a negative offset from a noalias argument or call 1101 /// can be inbounds w.r.t the actual underlying object. 1102 static bool isBaseOfObject(const Value *V) { 1103 // TODO: We can handle other cases here 1104 // 1) For GC languages, arguments to functions are often required to be 1105 // base pointers. 1106 // 2) Result of allocation routines are often base pointers. Leverage TLI. 1107 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V)); 1108 } 1109 1110 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1111 /// another pointer. 1112 /// 1113 /// We know that V1 is a GEP, but we don't know anything about V2. 1114 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for 1115 /// V2. 1116 AliasResult BasicAAResult::aliasGEP( 1117 const GEPOperator *GEP1, LocationSize V1Size, 1118 const Value *V2, LocationSize V2Size, 1119 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { 1120 if (!V1Size.hasValue() && !V2Size.hasValue()) { 1121 // TODO: This limitation exists for compile-time reasons. Relax it if we 1122 // can avoid exponential pathological cases. 1123 if (!isa<GEPOperator>(V2)) 1124 return AliasResult::MayAlias; 1125 1126 // If both accesses have unknown size, we can only check whether the base 1127 // objects don't alias. 1128 AliasResult BaseAlias = getBestAAResults().alias( 1129 MemoryLocation::getBeforeOrAfter(UnderlyingV1), 1130 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI); 1131 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias 1132 : AliasResult::MayAlias; 1133 } 1134 1135 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT); 1136 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT); 1137 1138 // Bail if we were not able to decompose anything. 1139 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2) 1140 return AliasResult::MayAlias; 1141 1142 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1143 // symbolic difference. 1144 subtractDecomposedGEPs(DecompGEP1, DecompGEP2); 1145 1146 // If an inbounds GEP would have to start from an out of bounds address 1147 // for the two to alias, then we can assume noalias. 1148 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() && 1149 V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) && 1150 isBaseOfObject(DecompGEP2.Base)) 1151 return AliasResult::NoAlias; 1152 1153 if (isa<GEPOperator>(V2)) { 1154 // Symmetric case to above. 1155 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() && 1156 V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) && 1157 isBaseOfObject(DecompGEP1.Base)) 1158 return AliasResult::NoAlias; 1159 } 1160 1161 // For GEPs with identical offsets, we can preserve the size and AAInfo 1162 // when performing the alias check on the underlying objects. 1163 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty()) 1164 return getBestAAResults().alias(MemoryLocation(DecompGEP1.Base, V1Size), 1165 MemoryLocation(DecompGEP2.Base, V2Size), 1166 AAQI); 1167 1168 // Do the base pointers alias? 1169 AliasResult BaseAlias = getBestAAResults().alias( 1170 MemoryLocation::getBeforeOrAfter(DecompGEP1.Base), 1171 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI); 1172 1173 // If we get a No or May, then return it immediately, no amount of analysis 1174 // will improve this situation. 1175 if (BaseAlias != AliasResult::MustAlias) { 1176 assert(BaseAlias == AliasResult::NoAlias || 1177 BaseAlias == AliasResult::MayAlias); 1178 return BaseAlias; 1179 } 1180 1181 // If there is a constant difference between the pointers, but the difference 1182 // is less than the size of the associated memory object, then we know 1183 // that the objects are partially overlapping. If the difference is 1184 // greater, we know they do not overlap. 1185 if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) { 1186 APInt &Off = DecompGEP1.Offset; 1187 1188 // Initialize for Off >= 0 (V2 <= GEP1) case. 1189 const Value *LeftPtr = V2; 1190 const Value *RightPtr = GEP1; 1191 LocationSize VLeftSize = V2Size; 1192 LocationSize VRightSize = V1Size; 1193 const bool Swapped = Off.isNegative(); 1194 1195 if (Swapped) { 1196 // Swap if we have the situation where: 1197 // + + 1198 // | BaseOffset | 1199 // ---------------->| 1200 // |-->V1Size |-------> V2Size 1201 // GEP1 V2 1202 std::swap(LeftPtr, RightPtr); 1203 std::swap(VLeftSize, VRightSize); 1204 Off = -Off; 1205 } 1206 1207 if (VLeftSize.hasValue()) { 1208 const uint64_t LSize = VLeftSize.getValue(); 1209 if (Off.ult(LSize)) { 1210 // Conservatively drop processing if a phi was visited and/or offset is 1211 // too big. 1212 AliasResult AR = AliasResult::PartialAlias; 1213 if (VRightSize.hasValue() && Off.ule(INT32_MAX) && 1214 (Off + VRightSize.getValue()).ule(LSize)) { 1215 // Memory referenced by right pointer is nested. Save the offset in 1216 // cache. Note that originally offset estimated as GEP1-V2, but 1217 // AliasResult contains the shift that represents GEP1+Offset=V2. 1218 AR.setOffset(-Off.getSExtValue()); 1219 AR.swap(Swapped); 1220 } 1221 return AR; 1222 } 1223 return AliasResult::NoAlias; 1224 } 1225 } 1226 1227 if (!DecompGEP1.VarIndices.empty()) { 1228 APInt GCD; 1229 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset); 1230 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1231 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i]; 1232 const APInt &Scale = Index.Scale; 1233 APInt ScaleForGCD = Scale; 1234 if (!Index.IsNSW) 1235 ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(), 1236 Scale.countTrailingZeros()); 1237 1238 if (i == 0) 1239 GCD = ScaleForGCD.abs(); 1240 else 1241 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs()); 1242 1243 ConstantRange CR = 1244 computeConstantRange(Index.Val.V, true, &AC, Index.CxtI); 1245 KnownBits Known = 1246 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT); 1247 CR = CR.intersectWith( 1248 ConstantRange::fromKnownBits(Known, /* Signed */ true), 1249 ConstantRange::Signed); 1250 1251 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() && 1252 "Bit widths are normalized to MaxPointerSize"); 1253 OffsetRange = OffsetRange.add( 1254 Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth()) 1255 .smul_fast(ConstantRange(Scale))); 1256 } 1257 1258 // We now have accesses at two offsets from the same base: 1259 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size 1260 // 2. 0 with size V2Size 1261 // Using arithmetic modulo GCD, the accesses are at 1262 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits 1263 // into the range [V2Size..GCD), then we know they cannot overlap. 1264 APInt ModOffset = DecompGEP1.Offset.srem(GCD); 1265 if (ModOffset.isNegative()) 1266 ModOffset += GCD; // We want mod, not rem. 1267 if (V1Size.hasValue() && V2Size.hasValue() && 1268 ModOffset.uge(V2Size.getValue()) && 1269 (GCD - ModOffset).uge(V1Size.getValue())) 1270 return AliasResult::NoAlias; 1271 1272 if (V1Size.hasValue() && V2Size.hasValue()) { 1273 // Compute ranges of potentially accessed bytes for both accesses. If the 1274 // interseciton is empty, there can be no overlap. 1275 unsigned BW = OffsetRange.getBitWidth(); 1276 ConstantRange Range1 = OffsetRange.add( 1277 ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue()))); 1278 ConstantRange Range2 = 1279 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue())); 1280 if (Range1.intersectWith(Range2).isEmptySet()) 1281 return AliasResult::NoAlias; 1282 } 1283 1284 if (V1Size.hasValue() && V2Size.hasValue()) { 1285 // Try to determine the range of values for VarIndex such that 1286 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex. 1287 Optional<APInt> MinAbsVarIndex; 1288 if (DecompGEP1.VarIndices.size() == 1) { 1289 // VarIndex = Scale*V. 1290 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0]; 1291 if (Var.Val.TruncBits == 0 && 1292 isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) { 1293 // If V != 0 then abs(VarIndex) >= abs(Scale). 1294 MinAbsVarIndex = Var.Scale.abs(); 1295 } 1296 } else if (DecompGEP1.VarIndices.size() == 2) { 1297 // VarIndex = Scale*V0 + (-Scale)*V1. 1298 // If V0 != V1 then abs(VarIndex) >= abs(Scale). 1299 // Check that VisitedPhiBBs is empty, to avoid reasoning about 1300 // inequality of values across loop iterations. 1301 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0]; 1302 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1]; 1303 if (Var0.Scale == -Var1.Scale && Var0.Val.TruncBits == 0 && 1304 Var0.Val.hasSameCastsAs(Var1.Val) && VisitedPhiBBs.empty() && 1305 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr, 1306 DT)) 1307 MinAbsVarIndex = Var0.Scale.abs(); 1308 } 1309 1310 if (MinAbsVarIndex) { 1311 // The constant offset will have added at least +/-MinAbsVarIndex to it. 1312 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex; 1313 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex; 1314 // We know that Offset <= OffsetLo || Offset >= OffsetHi 1315 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) && 1316 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue())) 1317 return AliasResult::NoAlias; 1318 } 1319 } 1320 1321 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT)) 1322 return AliasResult::NoAlias; 1323 } 1324 1325 // Statically, we can see that the base objects are the same, but the 1326 // pointers have dynamic offsets which we can't resolve. And none of our 1327 // little tricks above worked. 1328 return AliasResult::MayAlias; 1329 } 1330 1331 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1332 // If the results agree, take it. 1333 if (A == B) 1334 return A; 1335 // A mix of PartialAlias and MustAlias is PartialAlias. 1336 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) || 1337 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias)) 1338 return AliasResult::PartialAlias; 1339 // Otherwise, we don't know anything. 1340 return AliasResult::MayAlias; 1341 } 1342 1343 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1344 /// against another. 1345 AliasResult 1346 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, 1347 const Value *V2, LocationSize V2Size, 1348 AAQueryInfo &AAQI) { 1349 // If the values are Selects with the same condition, we can do a more precise 1350 // check: just check for aliases between the values on corresponding arms. 1351 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1352 if (SI->getCondition() == SI2->getCondition()) { 1353 AliasResult Alias = getBestAAResults().alias( 1354 MemoryLocation(SI->getTrueValue(), SISize), 1355 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI); 1356 if (Alias == AliasResult::MayAlias) 1357 return AliasResult::MayAlias; 1358 AliasResult ThisAlias = getBestAAResults().alias( 1359 MemoryLocation(SI->getFalseValue(), SISize), 1360 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI); 1361 return MergeAliasResults(ThisAlias, Alias); 1362 } 1363 1364 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1365 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1366 AliasResult Alias = getBestAAResults().alias( 1367 MemoryLocation(V2, V2Size), 1368 MemoryLocation(SI->getTrueValue(), SISize), AAQI); 1369 if (Alias == AliasResult::MayAlias) 1370 return AliasResult::MayAlias; 1371 1372 AliasResult ThisAlias = getBestAAResults().alias( 1373 MemoryLocation(V2, V2Size), 1374 MemoryLocation(SI->getFalseValue(), SISize), AAQI); 1375 return MergeAliasResults(ThisAlias, Alias); 1376 } 1377 1378 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1379 /// another. 1380 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, 1381 const Value *V2, LocationSize V2Size, 1382 AAQueryInfo &AAQI) { 1383 if (!PN->getNumIncomingValues()) 1384 return AliasResult::NoAlias; 1385 // If the values are PHIs in the same block, we can do a more precise 1386 // as well as efficient check: just check for aliases between the values 1387 // on corresponding edges. 1388 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1389 if (PN2->getParent() == PN->getParent()) { 1390 Optional<AliasResult> Alias; 1391 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1392 AliasResult ThisAlias = getBestAAResults().alias( 1393 MemoryLocation(PN->getIncomingValue(i), PNSize), 1394 MemoryLocation( 1395 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size), 1396 AAQI); 1397 if (Alias) 1398 *Alias = MergeAliasResults(*Alias, ThisAlias); 1399 else 1400 Alias = ThisAlias; 1401 if (*Alias == AliasResult::MayAlias) 1402 break; 1403 } 1404 return *Alias; 1405 } 1406 1407 SmallVector<Value *, 4> V1Srcs; 1408 // If a phi operand recurses back to the phi, we can still determine NoAlias 1409 // if we don't alias the underlying objects of the other phi operands, as we 1410 // know that the recursive phi needs to be based on them in some way. 1411 bool isRecursive = false; 1412 auto CheckForRecPhi = [&](Value *PV) { 1413 if (!EnableRecPhiAnalysis) 1414 return false; 1415 if (getUnderlyingObject(PV) == PN) { 1416 isRecursive = true; 1417 return true; 1418 } 1419 return false; 1420 }; 1421 1422 if (PV) { 1423 // If we have PhiValues then use it to get the underlying phi values. 1424 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN); 1425 // If we have more phi values than the search depth then return MayAlias 1426 // conservatively to avoid compile time explosion. The worst possible case 1427 // is if both sides are PHI nodes. In which case, this is O(m x n) time 1428 // where 'm' and 'n' are the number of PHI sources. 1429 if (PhiValueSet.size() > MaxLookupSearchDepth) 1430 return AliasResult::MayAlias; 1431 // Add the values to V1Srcs 1432 for (Value *PV1 : PhiValueSet) { 1433 if (CheckForRecPhi(PV1)) 1434 continue; 1435 V1Srcs.push_back(PV1); 1436 } 1437 } else { 1438 // If we don't have PhiInfo then just look at the operands of the phi itself 1439 // FIXME: Remove this once we can guarantee that we have PhiInfo always 1440 SmallPtrSet<Value *, 4> UniqueSrc; 1441 Value *OnePhi = nullptr; 1442 for (Value *PV1 : PN->incoming_values()) { 1443 if (isa<PHINode>(PV1)) { 1444 if (OnePhi && OnePhi != PV1) { 1445 // To control potential compile time explosion, we choose to be 1446 // conserviate when we have more than one Phi input. It is important 1447 // that we handle the single phi case as that lets us handle LCSSA 1448 // phi nodes and (combined with the recursive phi handling) simple 1449 // pointer induction variable patterns. 1450 return AliasResult::MayAlias; 1451 } 1452 OnePhi = PV1; 1453 } 1454 1455 if (CheckForRecPhi(PV1)) 1456 continue; 1457 1458 if (UniqueSrc.insert(PV1).second) 1459 V1Srcs.push_back(PV1); 1460 } 1461 1462 if (OnePhi && UniqueSrc.size() > 1) 1463 // Out of an abundance of caution, allow only the trivial lcssa and 1464 // recursive phi cases. 1465 return AliasResult::MayAlias; 1466 } 1467 1468 // If V1Srcs is empty then that means that the phi has no underlying non-phi 1469 // value. This should only be possible in blocks unreachable from the entry 1470 // block, but return MayAlias just in case. 1471 if (V1Srcs.empty()) 1472 return AliasResult::MayAlias; 1473 1474 // If this PHI node is recursive, indicate that the pointer may be moved 1475 // across iterations. We can only prove NoAlias if different underlying 1476 // objects are involved. 1477 if (isRecursive) 1478 PNSize = LocationSize::beforeOrAfterPointer(); 1479 1480 // In the recursive alias queries below, we may compare values from two 1481 // different loop iterations. Keep track of visited phi blocks, which will 1482 // be used when determining value equivalence. 1483 bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second; 1484 auto _ = make_scope_exit([&]() { 1485 if (BlockInserted) 1486 VisitedPhiBBs.erase(PN->getParent()); 1487 }); 1488 1489 // If we inserted a block into VisitedPhiBBs, alias analysis results that 1490 // have been cached earlier may no longer be valid. Perform recursive queries 1491 // with a new AAQueryInfo. 1492 AAQueryInfo NewAAQI = AAQI.withEmptyCache(); 1493 AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI; 1494 1495 AliasResult Alias = getBestAAResults().alias( 1496 MemoryLocation(V2, V2Size), 1497 MemoryLocation(V1Srcs[0], PNSize), *UseAAQI); 1498 1499 // Early exit if the check of the first PHI source against V2 is MayAlias. 1500 // Other results are not possible. 1501 if (Alias == AliasResult::MayAlias) 1502 return AliasResult::MayAlias; 1503 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will 1504 // remain valid to all elements and needs to conservatively return MayAlias. 1505 if (isRecursive && Alias != AliasResult::NoAlias) 1506 return AliasResult::MayAlias; 1507 1508 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1509 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1510 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1511 Value *V = V1Srcs[i]; 1512 1513 AliasResult ThisAlias = getBestAAResults().alias( 1514 MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI); 1515 Alias = MergeAliasResults(ThisAlias, Alias); 1516 if (Alias == AliasResult::MayAlias) 1517 break; 1518 } 1519 1520 return Alias; 1521 } 1522 1523 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1524 /// array references. 1525 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, 1526 const Value *V2, LocationSize V2Size, 1527 AAQueryInfo &AAQI) { 1528 // If either of the memory references is empty, it doesn't matter what the 1529 // pointer values are. 1530 if (V1Size.isZero() || V2Size.isZero()) 1531 return AliasResult::NoAlias; 1532 1533 // Strip off any casts if they exist. 1534 V1 = V1->stripPointerCastsForAliasAnalysis(); 1535 V2 = V2->stripPointerCastsForAliasAnalysis(); 1536 1537 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1538 // value for undef that aliases nothing in the program. 1539 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1540 return AliasResult::NoAlias; 1541 1542 // Are we checking for alias of the same value? 1543 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1544 // different iterations. We must therefore make sure that this is not the 1545 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1546 // happen by looking at the visited phi nodes and making sure they cannot 1547 // reach the value. 1548 if (isValueEqualInPotentialCycles(V1, V2)) 1549 return AliasResult::MustAlias; 1550 1551 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1552 return AliasResult::NoAlias; // Scalars cannot alias each other 1553 1554 // Figure out what objects these things are pointing to if we can. 1555 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth); 1556 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth); 1557 1558 // Null values in the default address space don't point to any object, so they 1559 // don't alias any other pointer. 1560 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1561 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1562 return AliasResult::NoAlias; 1563 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1564 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1565 return AliasResult::NoAlias; 1566 1567 if (O1 != O2) { 1568 // If V1/V2 point to two different objects, we know that we have no alias. 1569 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1570 return AliasResult::NoAlias; 1571 1572 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1573 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1574 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1575 return AliasResult::NoAlias; 1576 1577 // Function arguments can't alias with things that are known to be 1578 // unambigously identified at the function level. 1579 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1580 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1581 return AliasResult::NoAlias; 1582 1583 // If one pointer is the result of a call/invoke or load and the other is a 1584 // non-escaping local object within the same function, then we know the 1585 // object couldn't escape to a point where the call could return it. 1586 // 1587 // Note that if the pointers are in different functions, there are a 1588 // variety of complications. A call with a nocapture argument may still 1589 // temporary store the nocapture argument's value in a temporary memory 1590 // location if that memory location doesn't escape. Or it may pass a 1591 // nocapture value to other functions as long as they don't capture it. 1592 if (isEscapeSource(O1) && 1593 AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1))) 1594 return AliasResult::NoAlias; 1595 if (isEscapeSource(O2) && 1596 AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2))) 1597 return AliasResult::NoAlias; 1598 } 1599 1600 // If the size of one access is larger than the entire object on the other 1601 // side, then we know such behavior is undefined and can assume no alias. 1602 bool NullIsValidLocation = NullPointerIsDefined(&F); 1603 if ((isObjectSmallerThan( 1604 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL, 1605 TLI, NullIsValidLocation)) || 1606 (isObjectSmallerThan( 1607 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL, 1608 TLI, NullIsValidLocation))) 1609 return AliasResult::NoAlias; 1610 1611 // If one the accesses may be before the accessed pointer, canonicalize this 1612 // by using unknown after-pointer sizes for both accesses. This is 1613 // equivalent, because regardless of which pointer is lower, one of them 1614 // will always came after the other, as long as the underlying objects aren't 1615 // disjoint. We do this so that the rest of BasicAA does not have to deal 1616 // with accesses before the base pointer, and to improve cache utilization by 1617 // merging equivalent states. 1618 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) { 1619 V1Size = LocationSize::afterPointer(); 1620 V2Size = LocationSize::afterPointer(); 1621 } 1622 1623 // FIXME: If this depth limit is hit, then we may cache sub-optimal results 1624 // for recursive queries. For this reason, this limit is chosen to be large 1625 // enough to be very rarely hit, while still being small enough to avoid 1626 // stack overflows. 1627 if (AAQI.Depth >= 512) 1628 return AliasResult::MayAlias; 1629 1630 // Check the cache before climbing up use-def chains. This also terminates 1631 // otherwise infinitely recursive queries. 1632 AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size}); 1633 const bool Swapped = V1 > V2; 1634 if (Swapped) 1635 std::swap(Locs.first, Locs.second); 1636 const auto &Pair = AAQI.AliasCache.try_emplace( 1637 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0}); 1638 if (!Pair.second) { 1639 auto &Entry = Pair.first->second; 1640 if (!Entry.isDefinitive()) { 1641 // Remember that we used an assumption. 1642 ++Entry.NumAssumptionUses; 1643 ++AAQI.NumAssumptionUses; 1644 } 1645 // Cache contains sorted {V1,V2} pairs but we should return original order. 1646 auto Result = Entry.Result; 1647 Result.swap(Swapped); 1648 return Result; 1649 } 1650 1651 int OrigNumAssumptionUses = AAQI.NumAssumptionUses; 1652 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size(); 1653 AliasResult Result = 1654 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2); 1655 1656 auto It = AAQI.AliasCache.find(Locs); 1657 assert(It != AAQI.AliasCache.end() && "Must be in cache"); 1658 auto &Entry = It->second; 1659 1660 // Check whether a NoAlias assumption has been used, but disproven. 1661 bool AssumptionDisproven = 1662 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias; 1663 if (AssumptionDisproven) 1664 Result = AliasResult::MayAlias; 1665 1666 // This is a definitive result now, when considered as a root query. 1667 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses; 1668 Entry.Result = Result; 1669 // Cache contains sorted {V1,V2} pairs. 1670 Entry.Result.swap(Swapped); 1671 Entry.NumAssumptionUses = -1; 1672 1673 // If the assumption has been disproven, remove any results that may have 1674 // been based on this assumption. Do this after the Entry updates above to 1675 // avoid iterator invalidation. 1676 if (AssumptionDisproven) 1677 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults) 1678 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val()); 1679 1680 // The result may still be based on assumptions higher up in the chain. 1681 // Remember it, so it can be purged from the cache later. 1682 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses && 1683 Result != AliasResult::MayAlias) 1684 AAQI.AssumptionBasedResults.push_back(Locs); 1685 return Result; 1686 } 1687 1688 AliasResult BasicAAResult::aliasCheckRecursive( 1689 const Value *V1, LocationSize V1Size, 1690 const Value *V2, LocationSize V2Size, 1691 AAQueryInfo &AAQI, const Value *O1, const Value *O2) { 1692 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1693 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI); 1694 if (Result != AliasResult::MayAlias) 1695 return Result; 1696 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) { 1697 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI); 1698 if (Result != AliasResult::MayAlias) 1699 return Result; 1700 } 1701 1702 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1703 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI); 1704 if (Result != AliasResult::MayAlias) 1705 return Result; 1706 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) { 1707 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI); 1708 if (Result != AliasResult::MayAlias) 1709 return Result; 1710 } 1711 1712 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1713 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI); 1714 if (Result != AliasResult::MayAlias) 1715 return Result; 1716 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) { 1717 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI); 1718 if (Result != AliasResult::MayAlias) 1719 return Result; 1720 } 1721 1722 // If both pointers are pointing into the same object and one of them 1723 // accesses the entire object, then the accesses must overlap in some way. 1724 if (O1 == O2) { 1725 bool NullIsValidLocation = NullPointerIsDefined(&F); 1726 if (V1Size.isPrecise() && V2Size.isPrecise() && 1727 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || 1728 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) 1729 return AliasResult::PartialAlias; 1730 } 1731 1732 return AliasResult::MayAlias; 1733 } 1734 1735 /// Check whether two Values can be considered equivalent. 1736 /// 1737 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1738 /// they can not be part of a cycle in the value graph by looking at all 1739 /// visited phi nodes an making sure that the phis cannot reach the value. We 1740 /// have to do this because we are looking through phi nodes (That is we say 1741 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1742 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1743 const Value *V2) { 1744 if (V != V2) 1745 return false; 1746 1747 const Instruction *Inst = dyn_cast<Instruction>(V); 1748 if (!Inst) 1749 return true; 1750 1751 if (VisitedPhiBBs.empty()) 1752 return true; 1753 1754 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1755 return false; 1756 1757 // Make sure that the visited phis cannot reach the Value. This ensures that 1758 // the Values cannot come from different iterations of a potential cycle the 1759 // phi nodes could be involved in. 1760 for (auto *P : VisitedPhiBBs) 1761 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT)) 1762 return false; 1763 1764 return true; 1765 } 1766 1767 /// Computes the symbolic difference between two de-composed GEPs. 1768 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP, 1769 const DecomposedGEP &SrcGEP) { 1770 DestGEP.Offset -= SrcGEP.Offset; 1771 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) { 1772 // Find V in Dest. This is N^2, but pointer indices almost never have more 1773 // than a few variable indexes. 1774 bool Found = false; 1775 for (auto I : enumerate(DestGEP.VarIndices)) { 1776 VariableGEPIndex &Dest = I.value(); 1777 if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) || 1778 !Dest.Val.hasSameCastsAs(Src.Val)) 1779 continue; 1780 1781 // If we found it, subtract off Scale V's from the entry in Dest. If it 1782 // goes to zero, remove the entry. 1783 if (Dest.Scale != Src.Scale) { 1784 Dest.Scale -= Src.Scale; 1785 Dest.IsNSW = false; 1786 } else { 1787 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index()); 1788 } 1789 Found = true; 1790 break; 1791 } 1792 1793 // If we didn't consume this entry, add it to the end of the Dest list. 1794 if (!Found) { 1795 VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW}; 1796 DestGEP.VarIndices.push_back(Entry); 1797 } 1798 } 1799 } 1800 1801 bool BasicAAResult::constantOffsetHeuristic( 1802 const DecomposedGEP &GEP, LocationSize MaybeV1Size, 1803 LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) { 1804 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() || 1805 !MaybeV2Size.hasValue()) 1806 return false; 1807 1808 const uint64_t V1Size = MaybeV1Size.getValue(); 1809 const uint64_t V2Size = MaybeV2Size.getValue(); 1810 1811 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1]; 1812 1813 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) || 1814 Var0.Scale != -Var1.Scale || 1815 Var0.Val.V->getType() != Var1.Val.V->getType()) 1816 return false; 1817 1818 // We'll strip off the Extensions of Var0 and Var1 and do another round 1819 // of GetLinearExpression decomposition. In the example above, if Var0 1820 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 1821 1822 LinearExpression E0 = 1823 GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT); 1824 LinearExpression E1 = 1825 GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT); 1826 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) || 1827 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V)) 1828 return false; 1829 1830 // We have a hit - Var0 and Var1 only differ by a constant offset! 1831 1832 // If we've been sext'ed then zext'd the maximum difference between Var0 and 1833 // Var1 is possible to calculate, but we're just interested in the absolute 1834 // minimum difference between the two. The minimum distance may occur due to 1835 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 1836 // the minimum distance between %i and %i + 5 is 3. 1837 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff; 1838 MinDiff = APIntOps::umin(MinDiff, Wrapped); 1839 APInt MinDiffBytes = 1840 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs(); 1841 1842 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 1843 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 1844 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 1845 // V2Size can fit in the MinDiffBytes gap. 1846 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) && 1847 MinDiffBytes.uge(V2Size + GEP.Offset.abs()); 1848 } 1849 1850 //===----------------------------------------------------------------------===// 1851 // BasicAliasAnalysis Pass 1852 //===----------------------------------------------------------------------===// 1853 1854 AnalysisKey BasicAA::Key; 1855 1856 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 1857 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1858 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1859 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1860 auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F); 1861 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV); 1862 } 1863 1864 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 1865 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 1866 } 1867 1868 char BasicAAWrapperPass::ID = 0; 1869 1870 void BasicAAWrapperPass::anchor() {} 1871 1872 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", 1873 "Basic Alias Analysis (stateless AA impl)", true, true) 1874 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1875 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1876 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1877 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 1878 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa", 1879 "Basic Alias Analysis (stateless AA impl)", true, true) 1880 1881 FunctionPass *llvm::createBasicAAWrapperPass() { 1882 return new BasicAAWrapperPass(); 1883 } 1884 1885 bool BasicAAWrapperPass::runOnFunction(Function &F) { 1886 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 1887 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 1888 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 1889 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>(); 1890 1891 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, 1892 TLIWP.getTLI(F), ACT.getAssumptionCache(F), 1893 &DTWP.getDomTree(), 1894 PVWP ? &PVWP->getResult() : nullptr)); 1895 1896 return false; 1897 } 1898 1899 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1900 AU.setPreservesAll(); 1901 AU.addRequiredTransitive<AssumptionCacheTracker>(); 1902 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 1903 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 1904 AU.addUsedIfAvailable<PhiValuesWrapperPass>(); 1905 } 1906 1907 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 1908 return BasicAAResult( 1909 F.getParent()->getDataLayout(), F, 1910 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 1911 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 1912 } 1913