1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the primary stateless implementation of the 10 // Alias Analysis interface that implements identities (two different 11 // globals cannot alias, etc), but does no stateful analysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/BasicAliasAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ScopeExit.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/CFG.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/MemoryLocation.h" 28 #include "llvm/Analysis/PhiValues.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/ConstantRange.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Dominators.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GetElementPtrTypeIterator.h" 41 #include "llvm/IR/GlobalAlias.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/Metadata.h" 49 #include "llvm/IR/Operator.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Compiler.h" 58 #include "llvm/Support/KnownBits.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <cstdlib> 62 #include <utility> 63 64 #define DEBUG_TYPE "basicaa" 65 66 using namespace llvm; 67 68 /// Enable analysis of recursive PHI nodes. 69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, 70 cl::init(true)); 71 72 /// SearchLimitReached / SearchTimes shows how often the limit of 73 /// to decompose GEPs is reached. It will affect the precision 74 /// of basic alias analysis. 75 STATISTIC(SearchLimitReached, "Number of times the limit to " 76 "decompose GEPs is reached"); 77 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 78 79 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 80 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 81 /// careful with value equivalence. We use reachability to make sure a value 82 /// cannot be involved in a cycle. 83 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 84 85 // The max limit of the search depth in DecomposeGEPExpression() and 86 // getUnderlyingObject(). 87 static const unsigned MaxLookupSearchDepth = 6; 88 89 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, 90 FunctionAnalysisManager::Invalidator &Inv) { 91 // We don't care if this analysis itself is preserved, it has no state. But 92 // we need to check that the analyses it depends on have been. Note that we 93 // may be created without handles to some analyses and in that case don't 94 // depend on them. 95 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) || 96 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) || 97 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA))) 98 return true; 99 100 // Otherwise this analysis result remains valid. 101 return false; 102 } 103 104 //===----------------------------------------------------------------------===// 105 // Useful predicates 106 //===----------------------------------------------------------------------===// 107 108 /// Returns true if the pointer is one which would have been considered an 109 /// escape by isNonEscapingLocalObject. 110 static bool isEscapeSource(const Value *V) { 111 if (isa<CallBase>(V)) 112 return true; 113 114 // The load case works because isNonEscapingLocalObject considers all 115 // stores to be escapes (it passes true for the StoreCaptures argument 116 // to PointerMayBeCaptured). 117 if (isa<LoadInst>(V)) 118 return true; 119 120 // The inttoptr case works because isNonEscapingLocalObject considers all 121 // means of converting or equating a pointer to an int (ptrtoint, ptr store 122 // which could be followed by an integer load, ptr<->int compare) as 123 // escaping, and objects located at well-known addresses via platform-specific 124 // means cannot be considered non-escaping local objects. 125 if (isa<IntToPtrInst>(V)) 126 return true; 127 128 return false; 129 } 130 131 /// Returns the size of the object specified by V or UnknownSize if unknown. 132 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 133 const TargetLibraryInfo &TLI, 134 bool NullIsValidLoc, 135 bool RoundToAlign = false) { 136 uint64_t Size; 137 ObjectSizeOpts Opts; 138 Opts.RoundToAlign = RoundToAlign; 139 Opts.NullIsUnknownSize = NullIsValidLoc; 140 if (getObjectSize(V, Size, DL, &TLI, Opts)) 141 return Size; 142 return MemoryLocation::UnknownSize; 143 } 144 145 /// Returns true if we can prove that the object specified by V is smaller than 146 /// Size. 147 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 148 const DataLayout &DL, 149 const TargetLibraryInfo &TLI, 150 bool NullIsValidLoc) { 151 // Note that the meanings of the "object" are slightly different in the 152 // following contexts: 153 // c1: llvm::getObjectSize() 154 // c2: llvm.objectsize() intrinsic 155 // c3: isObjectSmallerThan() 156 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 157 // refers to the "entire object". 158 // 159 // Consider this example: 160 // char *p = (char*)malloc(100) 161 // char *q = p+80; 162 // 163 // In the context of c1 and c2, the "object" pointed by q refers to the 164 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 165 // 166 // However, in the context of c3, the "object" refers to the chunk of memory 167 // being allocated. So, the "object" has 100 bytes, and q points to the middle 168 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 169 // parameter, before the llvm::getObjectSize() is called to get the size of 170 // entire object, we should: 171 // - either rewind the pointer q to the base-address of the object in 172 // question (in this case rewind to p), or 173 // - just give up. It is up to caller to make sure the pointer is pointing 174 // to the base address the object. 175 // 176 // We go for 2nd option for simplicity. 177 if (!isIdentifiedObject(V)) 178 return false; 179 180 // This function needs to use the aligned object size because we allow 181 // reads a bit past the end given sufficient alignment. 182 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, 183 /*RoundToAlign*/ true); 184 185 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 186 } 187 188 /// Return the minimal extent from \p V to the end of the underlying object, 189 /// assuming the result is used in an aliasing query. E.g., we do use the query 190 /// location size and the fact that null pointers cannot alias here. 191 static uint64_t getMinimalExtentFrom(const Value &V, 192 const LocationSize &LocSize, 193 const DataLayout &DL, 194 bool NullIsValidLoc) { 195 // If we have dereferenceability information we know a lower bound for the 196 // extent as accesses for a lower offset would be valid. We need to exclude 197 // the "or null" part if null is a valid pointer. We can ignore frees, as an 198 // access after free would be undefined behavior. 199 bool CanBeNull, CanBeFreed; 200 uint64_t DerefBytes = 201 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); 202 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; 203 // If queried with a precise location size, we assume that location size to be 204 // accessed, thus valid. 205 if (LocSize.isPrecise()) 206 DerefBytes = std::max(DerefBytes, LocSize.getValue()); 207 return DerefBytes; 208 } 209 210 /// Returns true if we can prove that the object specified by V has size Size. 211 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 212 const TargetLibraryInfo &TLI, bool NullIsValidLoc) { 213 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc); 214 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 215 } 216 217 //===----------------------------------------------------------------------===// 218 // CaptureInfo implementations 219 //===----------------------------------------------------------------------===// 220 221 CaptureInfo::~CaptureInfo() = default; 222 223 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value *Object, 224 const Instruction *I) { 225 return isNonEscapingLocalObject(Object, &IsCapturedCache); 226 } 227 228 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value *Object, 229 const Instruction *I) { 230 if (!isIdentifiedFunctionLocal(Object)) 231 return false; 232 233 auto Iter = EarliestEscapes.insert({Object, nullptr}); 234 if (Iter.second) { 235 Instruction *EarliestCapture = FindEarliestCapture( 236 Object, *const_cast<Function *>(I->getFunction()), 237 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT); 238 if (EarliestCapture) { 239 auto Ins = Inst2Obj.insert({EarliestCapture, {}}); 240 Ins.first->second.push_back(Object); 241 } 242 Iter.first->second = EarliestCapture; 243 } 244 245 // No capturing instruction. 246 if (!Iter.first->second) 247 return true; 248 249 return I != Iter.first->second && 250 !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, &LI); 251 } 252 253 void EarliestEscapeInfo::removeInstruction(Instruction *I) { 254 auto Iter = Inst2Obj.find(I); 255 if (Iter != Inst2Obj.end()) { 256 for (const Value *Obj : Iter->second) 257 EarliestEscapes.erase(Obj); 258 Inst2Obj.erase(I); 259 } 260 } 261 262 //===----------------------------------------------------------------------===// 263 // GetElementPtr Instruction Decomposition and Analysis 264 //===----------------------------------------------------------------------===// 265 266 namespace { 267 /// Represents zext(sext(V)). 268 struct ExtendedValue { 269 const Value *V; 270 unsigned ZExtBits; 271 unsigned SExtBits; 272 273 explicit ExtendedValue(const Value *V, unsigned ZExtBits = 0, 274 unsigned SExtBits = 0) 275 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits) {} 276 277 unsigned getBitWidth() const { 278 return V->getType()->getPrimitiveSizeInBits() + ZExtBits + SExtBits; 279 } 280 281 ExtendedValue withValue(const Value *NewV) const { 282 return ExtendedValue(NewV, ZExtBits, SExtBits); 283 } 284 285 ExtendedValue withZExtOfValue(const Value *NewV) const { 286 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - 287 NewV->getType()->getPrimitiveSizeInBits(); 288 // zext(sext(zext(NewV))) == zext(zext(zext(NewV))) 289 return ExtendedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0); 290 } 291 292 ExtendedValue withSExtOfValue(const Value *NewV) const { 293 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() - 294 NewV->getType()->getPrimitiveSizeInBits(); 295 // zext(sext(sext(NewV))) 296 return ExtendedValue(NewV, ZExtBits, SExtBits + ExtendBy); 297 } 298 299 APInt evaluateWith(APInt N) const { 300 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 301 "Incompatible bit width"); 302 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits); 303 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits); 304 return N; 305 } 306 307 KnownBits evaluateWith(KnownBits N) const { 308 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 309 "Incompatible bit width"); 310 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits); 311 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits); 312 return N; 313 } 314 315 ConstantRange evaluateWith(ConstantRange N) const { 316 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() && 317 "Incompatible bit width"); 318 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits); 319 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits); 320 return N; 321 } 322 323 bool canDistributeOver(bool NUW, bool NSW) const { 324 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y) 325 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y) 326 return (!ZExtBits || NUW) && (!SExtBits || NSW); 327 } 328 329 bool hasSameExtensionsAs(const ExtendedValue &Other) const { 330 return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits; 331 } 332 }; 333 334 /// Represents zext(sext(V)) * Scale + Offset. 335 struct LinearExpression { 336 ExtendedValue Val; 337 APInt Scale; 338 APInt Offset; 339 340 /// True if all operations in this expression are NSW. 341 bool IsNSW; 342 343 LinearExpression(const ExtendedValue &Val, const APInt &Scale, 344 const APInt &Offset, bool IsNSW) 345 : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {} 346 347 LinearExpression(const ExtendedValue &Val) : Val(Val), IsNSW(true) { 348 unsigned BitWidth = Val.getBitWidth(); 349 Scale = APInt(BitWidth, 1); 350 Offset = APInt(BitWidth, 0); 351 } 352 }; 353 } 354 355 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 356 /// B are constant integers. 357 static LinearExpression GetLinearExpression( 358 const ExtendedValue &Val, const DataLayout &DL, unsigned Depth, 359 AssumptionCache *AC, DominatorTree *DT) { 360 // Limit our recursion depth. 361 if (Depth == 6) 362 return Val; 363 364 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V)) 365 return LinearExpression(Val, APInt(Val.getBitWidth(), 0), 366 Val.evaluateWith(Const->getValue()), true); 367 368 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) { 369 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 370 APInt RHS = Val.evaluateWith(RHSC->getValue()); 371 // The only non-OBO case we deal with is or, and only limited to the 372 // case where it is both nuw and nsw. 373 bool NUW = true, NSW = true; 374 if (isa<OverflowingBinaryOperator>(BOp)) { 375 NUW &= BOp->hasNoUnsignedWrap(); 376 NSW &= BOp->hasNoSignedWrap(); 377 } 378 if (!Val.canDistributeOver(NUW, NSW)) 379 return Val; 380 381 LinearExpression E(Val); 382 switch (BOp->getOpcode()) { 383 default: 384 // We don't understand this instruction, so we can't decompose it any 385 // further. 386 return Val; 387 case Instruction::Or: 388 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 389 // analyze it. 390 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 391 BOp, DT)) 392 return Val; 393 394 LLVM_FALLTHROUGH; 395 case Instruction::Add: { 396 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 397 Depth + 1, AC, DT); 398 E.Offset += RHS; 399 E.IsNSW &= NSW; 400 break; 401 } 402 case Instruction::Sub: { 403 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 404 Depth + 1, AC, DT); 405 E.Offset -= RHS; 406 E.IsNSW &= NSW; 407 break; 408 } 409 case Instruction::Mul: { 410 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 411 Depth + 1, AC, DT); 412 E.Offset *= RHS; 413 E.Scale *= RHS; 414 E.IsNSW &= NSW; 415 break; 416 } 417 case Instruction::Shl: 418 // We're trying to linearize an expression of the kind: 419 // shl i8 -128, 36 420 // where the shift count exceeds the bitwidth of the type. 421 // We can't decompose this further (the expression would return 422 // a poison value). 423 if (RHS.getLimitedValue() > Val.getBitWidth()) 424 return Val; 425 426 E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL, 427 Depth + 1, AC, DT); 428 E.Offset <<= RHS.getLimitedValue(); 429 E.Scale <<= RHS.getLimitedValue(); 430 E.IsNSW &= NSW; 431 break; 432 } 433 return E; 434 } 435 } 436 437 if (isa<ZExtInst>(Val.V)) 438 return GetLinearExpression( 439 Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), 440 DL, Depth + 1, AC, DT); 441 442 if (isa<SExtInst>(Val.V)) 443 return GetLinearExpression( 444 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)), 445 DL, Depth + 1, AC, DT); 446 447 return Val; 448 } 449 450 /// To ensure a pointer offset fits in an integer of size PointerSize 451 /// (in bits) when that size is smaller than the maximum pointer size. This is 452 /// an issue, for example, in particular for 32b pointers with negative indices 453 /// that rely on two's complement wrap-arounds for precise alias information 454 /// where the maximum pointer size is 64b. 455 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) { 456 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!"); 457 unsigned ShiftBits = Offset.getBitWidth() - PointerSize; 458 return (Offset << ShiftBits).ashr(ShiftBits); 459 } 460 461 namespace { 462 // A linear transformation of a Value; this class represents 463 // ZExt(SExt(V, SExtBits), ZExtBits) * Scale. 464 struct VariableGEPIndex { 465 ExtendedValue Val; 466 APInt Scale; 467 468 // Context instruction to use when querying information about this index. 469 const Instruction *CxtI; 470 471 /// True if all operations in this expression are NSW. 472 bool IsNSW; 473 474 void dump() const { 475 print(dbgs()); 476 dbgs() << "\n"; 477 } 478 void print(raw_ostream &OS) const { 479 OS << "(V=" << Val.V->getName() 480 << ", zextbits=" << Val.ZExtBits 481 << ", sextbits=" << Val.SExtBits 482 << ", scale=" << Scale << ")"; 483 } 484 }; 485 } 486 487 // Represents the internal structure of a GEP, decomposed into a base pointer, 488 // constant offsets, and variable scaled indices. 489 struct BasicAAResult::DecomposedGEP { 490 // Base pointer of the GEP 491 const Value *Base; 492 // Total constant offset from base. 493 APInt Offset; 494 // Scaled variable (non-constant) indices. 495 SmallVector<VariableGEPIndex, 4> VarIndices; 496 // Are all operations inbounds GEPs or non-indexing operations? 497 // (None iff expression doesn't involve any geps) 498 Optional<bool> InBounds; 499 500 void dump() const { 501 print(dbgs()); 502 dbgs() << "\n"; 503 } 504 void print(raw_ostream &OS) const { 505 OS << "(DecomposedGEP Base=" << Base->getName() 506 << ", Offset=" << Offset 507 << ", VarIndices=["; 508 for (size_t i = 0; i < VarIndices.size(); i++) { 509 if (i != 0) 510 OS << ", "; 511 VarIndices[i].print(OS); 512 } 513 OS << "])"; 514 } 515 }; 516 517 518 /// If V is a symbolic pointer expression, decompose it into a base pointer 519 /// with a constant offset and a number of scaled symbolic offsets. 520 /// 521 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 522 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 523 /// specified amount, but which may have other unrepresented high bits. As 524 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 525 BasicAAResult::DecomposedGEP 526 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL, 527 AssumptionCache *AC, DominatorTree *DT) { 528 // Limit recursion depth to limit compile time in crazy cases. 529 unsigned MaxLookup = MaxLookupSearchDepth; 530 SearchTimes++; 531 const Instruction *CxtI = dyn_cast<Instruction>(V); 532 533 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits(); 534 DecomposedGEP Decomposed; 535 Decomposed.Offset = APInt(MaxPointerSize, 0); 536 do { 537 // See if this is a bitcast or GEP. 538 const Operator *Op = dyn_cast<Operator>(V); 539 if (!Op) { 540 // The only non-operator case we can handle are GlobalAliases. 541 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 542 if (!GA->isInterposable()) { 543 V = GA->getAliasee(); 544 continue; 545 } 546 } 547 Decomposed.Base = V; 548 return Decomposed; 549 } 550 551 if (Op->getOpcode() == Instruction::BitCast || 552 Op->getOpcode() == Instruction::AddrSpaceCast) { 553 V = Op->getOperand(0); 554 continue; 555 } 556 557 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 558 if (!GEPOp) { 559 if (const auto *PHI = dyn_cast<PHINode>(V)) { 560 // Look through single-arg phi nodes created by LCSSA. 561 if (PHI->getNumIncomingValues() == 1) { 562 V = PHI->getIncomingValue(0); 563 continue; 564 } 565 } else if (const auto *Call = dyn_cast<CallBase>(V)) { 566 // CaptureTracking can know about special capturing properties of some 567 // intrinsics like launder.invariant.group, that can't be expressed with 568 // the attributes, but have properties like returning aliasing pointer. 569 // Because some analysis may assume that nocaptured pointer is not 570 // returned from some special intrinsic (because function would have to 571 // be marked with returns attribute), it is crucial to use this function 572 // because it should be in sync with CaptureTracking. Not using it may 573 // cause weird miscompilations where 2 aliasing pointers are assumed to 574 // noalias. 575 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 576 V = RP; 577 continue; 578 } 579 } 580 581 Decomposed.Base = V; 582 return Decomposed; 583 } 584 585 // Track whether we've seen at least one in bounds gep, and if so, whether 586 // all geps parsed were in bounds. 587 if (Decomposed.InBounds == None) 588 Decomposed.InBounds = GEPOp->isInBounds(); 589 else if (!GEPOp->isInBounds()) 590 Decomposed.InBounds = false; 591 592 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized"); 593 594 // Don't attempt to analyze GEPs if index scale is not a compile-time 595 // constant. 596 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) { 597 Decomposed.Base = V; 598 return Decomposed; 599 } 600 601 unsigned AS = GEPOp->getPointerAddressSpace(); 602 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 603 gep_type_iterator GTI = gep_type_begin(GEPOp); 604 unsigned PointerSize = DL.getPointerSizeInBits(AS); 605 // Assume all GEP operands are constants until proven otherwise. 606 bool GepHasConstantOffset = true; 607 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 608 I != E; ++I, ++GTI) { 609 const Value *Index = *I; 610 // Compute the (potentially symbolic) offset in bytes for this index. 611 if (StructType *STy = GTI.getStructTypeOrNull()) { 612 // For a struct, add the member offset. 613 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 614 if (FieldNo == 0) 615 continue; 616 617 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo); 618 continue; 619 } 620 621 // For an array/pointer, add the element offset, explicitly scaled. 622 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 623 if (CIdx->isZero()) 624 continue; 625 Decomposed.Offset += 626 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() * 627 CIdx->getValue().sextOrTrunc(MaxPointerSize); 628 continue; 629 } 630 631 GepHasConstantOffset = false; 632 633 APInt Scale(MaxPointerSize, 634 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); 635 // If the integer type is smaller than the pointer size, it is implicitly 636 // sign extended to pointer size. 637 unsigned Width = Index->getType()->getIntegerBitWidth(); 638 unsigned SExtBits = PointerSize > Width ? PointerSize - Width : 0; 639 LinearExpression LE = GetLinearExpression( 640 ExtendedValue(Index, 0, SExtBits), DL, 0, AC, DT); 641 642 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 643 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 644 645 // It can be the case that, even through C1*V+C2 does not overflow for 646 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot 647 // decompose the expression in this way. 648 // 649 // FIXME: C1*Scale and the other operations in the decomposed 650 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this 651 // possibility. 652 bool Overflow; 653 APInt ScaledOffset = LE.Offset.sextOrTrunc(MaxPointerSize) 654 .smul_ov(Scale, Overflow); 655 if (Overflow) { 656 LE = LinearExpression(ExtendedValue(Index, 0, SExtBits)); 657 } else { 658 Decomposed.Offset += ScaledOffset; 659 Scale *= LE.Scale.sextOrTrunc(MaxPointerSize); 660 } 661 662 // If we already had an occurrence of this index variable, merge this 663 // scale into it. For example, we want to handle: 664 // A[x][x] -> x*16 + x*4 -> x*20 665 // This also ensures that 'x' only appears in the index list once. 666 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 667 if (Decomposed.VarIndices[i].Val.V == LE.Val.V && 668 Decomposed.VarIndices[i].Val.hasSameExtensionsAs(LE.Val)) { 669 Scale += Decomposed.VarIndices[i].Scale; 670 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 671 break; 672 } 673 } 674 675 // Make sure that we have a scale that makes sense for this target's 676 // pointer size. 677 Scale = adjustToPointerSize(Scale, PointerSize); 678 679 if (!!Scale) { 680 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW}; 681 Decomposed.VarIndices.push_back(Entry); 682 } 683 } 684 685 // Take care of wrap-arounds 686 if (GepHasConstantOffset) 687 Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize); 688 689 // Analyze the base pointer next. 690 V = GEPOp->getOperand(0); 691 } while (--MaxLookup); 692 693 // If the chain of expressions is too deep, just return early. 694 Decomposed.Base = V; 695 SearchLimitReached++; 696 return Decomposed; 697 } 698 699 /// Returns whether the given pointer value points to memory that is local to 700 /// the function, with global constants being considered local to all 701 /// functions. 702 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 703 AAQueryInfo &AAQI, bool OrLocal) { 704 assert(Visited.empty() && "Visited must be cleared after use!"); 705 706 unsigned MaxLookup = 8; 707 SmallVector<const Value *, 16> Worklist; 708 Worklist.push_back(Loc.Ptr); 709 do { 710 const Value *V = getUnderlyingObject(Worklist.pop_back_val()); 711 if (!Visited.insert(V).second) { 712 Visited.clear(); 713 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 714 } 715 716 // An alloca instruction defines local memory. 717 if (OrLocal && isa<AllocaInst>(V)) 718 continue; 719 720 // A global constant counts as local memory for our purposes. 721 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 722 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 723 // global to be marked constant in some modules and non-constant in 724 // others. GV may even be a declaration, not a definition. 725 if (!GV->isConstant()) { 726 Visited.clear(); 727 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 728 } 729 continue; 730 } 731 732 // If both select values point to local memory, then so does the select. 733 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 734 Worklist.push_back(SI->getTrueValue()); 735 Worklist.push_back(SI->getFalseValue()); 736 continue; 737 } 738 739 // If all values incoming to a phi node point to local memory, then so does 740 // the phi. 741 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 742 // Don't bother inspecting phi nodes with many operands. 743 if (PN->getNumIncomingValues() > MaxLookup) { 744 Visited.clear(); 745 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 746 } 747 append_range(Worklist, PN->incoming_values()); 748 continue; 749 } 750 751 // Otherwise be conservative. 752 Visited.clear(); 753 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 754 } while (!Worklist.empty() && --MaxLookup); 755 756 Visited.clear(); 757 return Worklist.empty(); 758 } 759 760 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { 761 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call); 762 return II && II->getIntrinsicID() == IID; 763 } 764 765 /// Returns the behavior when calling the given call site. 766 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { 767 if (Call->doesNotAccessMemory()) 768 // Can't do better than this. 769 return FMRB_DoesNotAccessMemory; 770 771 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 772 773 // If the callsite knows it only reads memory, don't return worse 774 // than that. 775 if (Call->onlyReadsMemory()) 776 Min = FMRB_OnlyReadsMemory; 777 else if (Call->doesNotReadMemory()) 778 Min = FMRB_OnlyWritesMemory; 779 780 if (Call->onlyAccessesArgMemory()) 781 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 782 else if (Call->onlyAccessesInaccessibleMemory()) 783 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 784 else if (Call->onlyAccessesInaccessibleMemOrArgMem()) 785 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 786 787 // If the call has operand bundles then aliasing attributes from the function 788 // it calls do not directly apply to the call. This can be made more precise 789 // in the future. 790 if (!Call->hasOperandBundles()) 791 if (const Function *F = Call->getCalledFunction()) 792 Min = 793 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 794 795 return Min; 796 } 797 798 /// Returns the behavior when calling the given function. For use when the call 799 /// site is not known. 800 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 801 // If the function declares it doesn't access memory, we can't do better. 802 if (F->doesNotAccessMemory()) 803 return FMRB_DoesNotAccessMemory; 804 805 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 806 807 // If the function declares it only reads memory, go with that. 808 if (F->onlyReadsMemory()) 809 Min = FMRB_OnlyReadsMemory; 810 else if (F->doesNotReadMemory()) 811 Min = FMRB_OnlyWritesMemory; 812 813 if (F->onlyAccessesArgMemory()) 814 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 815 else if (F->onlyAccessesInaccessibleMemory()) 816 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 817 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 818 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 819 820 return Min; 821 } 822 823 /// Returns true if this is a writeonly (i.e Mod only) parameter. 824 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, 825 const TargetLibraryInfo &TLI) { 826 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) 827 return true; 828 829 // We can bound the aliasing properties of memset_pattern16 just as we can 830 // for memcpy/memset. This is particularly important because the 831 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 832 // whenever possible. 833 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 834 // attributes. 835 LibFunc F; 836 if (Call->getCalledFunction() && 837 TLI.getLibFunc(*Call->getCalledFunction(), F) && 838 F == LibFunc_memset_pattern16 && TLI.has(F)) 839 if (ArgIdx == 0) 840 return true; 841 842 // TODO: memset_pattern4, memset_pattern8 843 // TODO: _chk variants 844 // TODO: strcmp, strcpy 845 846 return false; 847 } 848 849 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, 850 unsigned ArgIdx) { 851 // Checking for known builtin intrinsics and target library functions. 852 if (isWriteOnlyParam(Call, ArgIdx, TLI)) 853 return ModRefInfo::Mod; 854 855 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) 856 return ModRefInfo::Ref; 857 858 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) 859 return ModRefInfo::NoModRef; 860 861 return AAResultBase::getArgModRefInfo(Call, ArgIdx); 862 } 863 864 #ifndef NDEBUG 865 static const Function *getParent(const Value *V) { 866 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 867 if (!inst->getParent()) 868 return nullptr; 869 return inst->getParent()->getParent(); 870 } 871 872 if (const Argument *arg = dyn_cast<Argument>(V)) 873 return arg->getParent(); 874 875 return nullptr; 876 } 877 878 static bool notDifferentParent(const Value *O1, const Value *O2) { 879 880 const Function *F1 = getParent(O1); 881 const Function *F2 = getParent(O2); 882 883 return !F1 || !F2 || F1 == F2; 884 } 885 #endif 886 887 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 888 const MemoryLocation &LocB, 889 AAQueryInfo &AAQI) { 890 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 891 "BasicAliasAnalysis doesn't support interprocedural queries."); 892 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI); 893 } 894 895 /// Checks to see if the specified callsite can clobber the specified memory 896 /// object. 897 /// 898 /// Since we only look at local properties of this function, we really can't 899 /// say much about this query. We do, however, use simple "address taken" 900 /// analysis on local objects. 901 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, 902 const MemoryLocation &Loc, 903 AAQueryInfo &AAQI) { 904 assert(notDifferentParent(Call, Loc.Ptr) && 905 "AliasAnalysis query involving multiple functions!"); 906 907 const Value *Object = getUnderlyingObject(Loc.Ptr); 908 909 // Calls marked 'tail' cannot read or write allocas from the current frame 910 // because the current frame might be destroyed by the time they run. However, 911 // a tail call may use an alloca with byval. Calling with byval copies the 912 // contents of the alloca into argument registers or stack slots, so there is 913 // no lifetime issue. 914 if (isa<AllocaInst>(Object)) 915 if (const CallInst *CI = dyn_cast<CallInst>(Call)) 916 if (CI->isTailCall() && 917 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 918 return ModRefInfo::NoModRef; 919 920 // Stack restore is able to modify unescaped dynamic allocas. Assume it may 921 // modify them even though the alloca is not escaped. 922 if (auto *AI = dyn_cast<AllocaInst>(Object)) 923 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore)) 924 return ModRefInfo::Mod; 925 926 // If the pointer is to a locally allocated object that does not escape, 927 // then the call can not mod/ref the pointer unless the call takes the pointer 928 // as an argument, and itself doesn't capture it. 929 if (!isa<Constant>(Object) && Call != Object && 930 AAQI.CI->isNotCapturedBeforeOrAt(Object, Call)) { 931 932 // Optimistically assume that call doesn't touch Object and check this 933 // assumption in the following loop. 934 ModRefInfo Result = ModRefInfo::NoModRef; 935 bool IsMustAlias = true; 936 937 unsigned OperandNo = 0; 938 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); 939 CI != CE; ++CI, ++OperandNo) { 940 // Only look at the no-capture or byval pointer arguments. If this 941 // pointer were passed to arguments that were neither of these, then it 942 // couldn't be no-capture. 943 if (!(*CI)->getType()->isPointerTy() || 944 (!Call->doesNotCapture(OperandNo) && OperandNo < Call->arg_size() && 945 !Call->isByValArgument(OperandNo))) 946 continue; 947 948 // Call doesn't access memory through this operand, so we don't care 949 // if it aliases with Object. 950 if (Call->doesNotAccessMemory(OperandNo)) 951 continue; 952 953 // If this is a no-capture pointer argument, see if we can tell that it 954 // is impossible to alias the pointer we're checking. 955 AliasResult AR = getBestAAResults().alias( 956 MemoryLocation::getBeforeOrAfter(*CI), 957 MemoryLocation::getBeforeOrAfter(Object), AAQI); 958 if (AR != AliasResult::MustAlias) 959 IsMustAlias = false; 960 // Operand doesn't alias 'Object', continue looking for other aliases 961 if (AR == AliasResult::NoAlias) 962 continue; 963 // Operand aliases 'Object', but call doesn't modify it. Strengthen 964 // initial assumption and keep looking in case if there are more aliases. 965 if (Call->onlyReadsMemory(OperandNo)) { 966 Result = setRef(Result); 967 continue; 968 } 969 // Operand aliases 'Object' but call only writes into it. 970 if (Call->doesNotReadMemory(OperandNo)) { 971 Result = setMod(Result); 972 continue; 973 } 974 // This operand aliases 'Object' and call reads and writes into it. 975 // Setting ModRef will not yield an early return below, MustAlias is not 976 // used further. 977 Result = ModRefInfo::ModRef; 978 break; 979 } 980 981 // No operand aliases, reset Must bit. Add below if at least one aliases 982 // and all aliases found are MustAlias. 983 if (isNoModRef(Result)) 984 IsMustAlias = false; 985 986 // Early return if we improved mod ref information 987 if (!isModAndRefSet(Result)) { 988 if (isNoModRef(Result)) 989 return ModRefInfo::NoModRef; 990 return IsMustAlias ? setMust(Result) : clearMust(Result); 991 } 992 } 993 994 // If the call is malloc/calloc like, we can assume that it doesn't 995 // modify any IR visible value. This is only valid because we assume these 996 // routines do not read values visible in the IR. TODO: Consider special 997 // casing realloc and strdup routines which access only their arguments as 998 // well. Or alternatively, replace all of this with inaccessiblememonly once 999 // that's implemented fully. 1000 if (isMallocOrCallocLikeFn(Call, &TLI)) { 1001 // Be conservative if the accessed pointer may alias the allocation - 1002 // fallback to the generic handling below. 1003 if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), Loc, 1004 AAQI) == AliasResult::NoAlias) 1005 return ModRefInfo::NoModRef; 1006 } 1007 1008 // The semantics of memcpy intrinsics either exactly overlap or do not 1009 // overlap, i.e., source and destination of any given memcpy are either 1010 // no-alias or must-alias. 1011 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) { 1012 AliasResult SrcAA = 1013 getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI); 1014 AliasResult DestAA = 1015 getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI); 1016 // It's also possible for Loc to alias both src and dest, or neither. 1017 ModRefInfo rv = ModRefInfo::NoModRef; 1018 if (SrcAA != AliasResult::NoAlias) 1019 rv = setRef(rv); 1020 if (DestAA != AliasResult::NoAlias) 1021 rv = setMod(rv); 1022 return rv; 1023 } 1024 1025 // Guard intrinsics are marked as arbitrarily writing so that proper control 1026 // dependencies are maintained but they never mods any particular memory 1027 // location. 1028 // 1029 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1030 // heap state at the point the guard is issued needs to be consistent in case 1031 // the guard invokes the "deopt" continuation. 1032 if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) 1033 return ModRefInfo::Ref; 1034 // The same applies to deoptimize which is essentially a guard(false). 1035 if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize)) 1036 return ModRefInfo::Ref; 1037 1038 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 1039 // writing so that proper control dependencies are maintained but they never 1040 // mod any particular memory location visible to the IR. 1041 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 1042 // intrinsic is now modeled as reading memory. This prevents hoisting the 1043 // invariant.start intrinsic over stores. Consider: 1044 // *ptr = 40; 1045 // *ptr = 50; 1046 // invariant_start(ptr) 1047 // int val = *ptr; 1048 // print(val); 1049 // 1050 // This cannot be transformed to: 1051 // 1052 // *ptr = 40; 1053 // invariant_start(ptr) 1054 // *ptr = 50; 1055 // int val = *ptr; 1056 // print(val); 1057 // 1058 // The transformation will cause the second store to be ignored (based on 1059 // rules of invariant.start) and print 40, while the first program always 1060 // prints 50. 1061 if (isIntrinsicCall(Call, Intrinsic::invariant_start)) 1062 return ModRefInfo::Ref; 1063 1064 // The AAResultBase base class has some smarts, lets use them. 1065 return AAResultBase::getModRefInfo(Call, Loc, AAQI); 1066 } 1067 1068 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, 1069 const CallBase *Call2, 1070 AAQueryInfo &AAQI) { 1071 // Guard intrinsics are marked as arbitrarily writing so that proper control 1072 // dependencies are maintained but they never mods any particular memory 1073 // location. 1074 // 1075 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1076 // heap state at the point the guard is issued needs to be consistent in case 1077 // the guard invokes the "deopt" continuation. 1078 1079 // NB! This function is *not* commutative, so we special case two 1080 // possibilities for guard intrinsics. 1081 1082 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) 1083 return isModSet(createModRefInfo(getModRefBehavior(Call2))) 1084 ? ModRefInfo::Ref 1085 : ModRefInfo::NoModRef; 1086 1087 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) 1088 return isModSet(createModRefInfo(getModRefBehavior(Call1))) 1089 ? ModRefInfo::Mod 1090 : ModRefInfo::NoModRef; 1091 1092 // The AAResultBase base class has some smarts, lets use them. 1093 return AAResultBase::getModRefInfo(Call1, Call2, AAQI); 1094 } 1095 1096 /// Return true if we know V to the base address of the corresponding memory 1097 /// object. This implies that any address less than V must be out of bounds 1098 /// for the underlying object. Note that just being isIdentifiedObject() is 1099 /// not enough - For example, a negative offset from a noalias argument or call 1100 /// can be inbounds w.r.t the actual underlying object. 1101 static bool isBaseOfObject(const Value *V) { 1102 // TODO: We can handle other cases here 1103 // 1) For GC languages, arguments to functions are often required to be 1104 // base pointers. 1105 // 2) Result of allocation routines are often base pointers. Leverage TLI. 1106 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V)); 1107 } 1108 1109 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1110 /// another pointer. 1111 /// 1112 /// We know that V1 is a GEP, but we don't know anything about V2. 1113 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for 1114 /// V2. 1115 AliasResult BasicAAResult::aliasGEP( 1116 const GEPOperator *GEP1, LocationSize V1Size, 1117 const Value *V2, LocationSize V2Size, 1118 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { 1119 if (!V1Size.hasValue() && !V2Size.hasValue()) { 1120 // TODO: This limitation exists for compile-time reasons. Relax it if we 1121 // can avoid exponential pathological cases. 1122 if (!isa<GEPOperator>(V2)) 1123 return AliasResult::MayAlias; 1124 1125 // If both accesses have unknown size, we can only check whether the base 1126 // objects don't alias. 1127 AliasResult BaseAlias = getBestAAResults().alias( 1128 MemoryLocation::getBeforeOrAfter(UnderlyingV1), 1129 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI); 1130 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias 1131 : AliasResult::MayAlias; 1132 } 1133 1134 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT); 1135 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT); 1136 1137 // Bail if we were not able to decompose anything. 1138 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2) 1139 return AliasResult::MayAlias; 1140 1141 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1142 // symbolic difference. 1143 subtractDecomposedGEPs(DecompGEP1, DecompGEP2); 1144 1145 // If an inbounds GEP would have to start from an out of bounds address 1146 // for the two to alias, then we can assume noalias. 1147 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() && 1148 V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) && 1149 isBaseOfObject(DecompGEP2.Base)) 1150 return AliasResult::NoAlias; 1151 1152 if (isa<GEPOperator>(V2)) { 1153 // Symmetric case to above. 1154 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() && 1155 V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) && 1156 isBaseOfObject(DecompGEP1.Base)) 1157 return AliasResult::NoAlias; 1158 } 1159 1160 // For GEPs with identical offsets, we can preserve the size and AAInfo 1161 // when performing the alias check on the underlying objects. 1162 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty()) 1163 return getBestAAResults().alias(MemoryLocation(DecompGEP1.Base, V1Size), 1164 MemoryLocation(DecompGEP2.Base, V2Size), 1165 AAQI); 1166 1167 // Do the base pointers alias? 1168 AliasResult BaseAlias = getBestAAResults().alias( 1169 MemoryLocation::getBeforeOrAfter(DecompGEP1.Base), 1170 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI); 1171 1172 // If we get a No or May, then return it immediately, no amount of analysis 1173 // will improve this situation. 1174 if (BaseAlias != AliasResult::MustAlias) { 1175 assert(BaseAlias == AliasResult::NoAlias || 1176 BaseAlias == AliasResult::MayAlias); 1177 return BaseAlias; 1178 } 1179 1180 // If there is a constant difference between the pointers, but the difference 1181 // is less than the size of the associated memory object, then we know 1182 // that the objects are partially overlapping. If the difference is 1183 // greater, we know they do not overlap. 1184 if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) { 1185 APInt &Off = DecompGEP1.Offset; 1186 1187 // Initialize for Off >= 0 (V2 <= GEP1) case. 1188 const Value *LeftPtr = V2; 1189 const Value *RightPtr = GEP1; 1190 LocationSize VLeftSize = V2Size; 1191 LocationSize VRightSize = V1Size; 1192 const bool Swapped = Off.isNegative(); 1193 1194 if (Swapped) { 1195 // Swap if we have the situation where: 1196 // + + 1197 // | BaseOffset | 1198 // ---------------->| 1199 // |-->V1Size |-------> V2Size 1200 // GEP1 V2 1201 std::swap(LeftPtr, RightPtr); 1202 std::swap(VLeftSize, VRightSize); 1203 Off = -Off; 1204 } 1205 1206 if (VLeftSize.hasValue()) { 1207 const uint64_t LSize = VLeftSize.getValue(); 1208 if (Off.ult(LSize)) { 1209 // Conservatively drop processing if a phi was visited and/or offset is 1210 // too big. 1211 AliasResult AR = AliasResult::PartialAlias; 1212 if (VRightSize.hasValue() && Off.ule(INT32_MAX) && 1213 (Off + VRightSize.getValue()).ule(LSize)) { 1214 // Memory referenced by right pointer is nested. Save the offset in 1215 // cache. Note that originally offset estimated as GEP1-V2, but 1216 // AliasResult contains the shift that represents GEP1+Offset=V2. 1217 AR.setOffset(-Off.getSExtValue()); 1218 AR.swap(Swapped); 1219 } 1220 return AR; 1221 } 1222 return AliasResult::NoAlias; 1223 } 1224 } 1225 1226 if (!DecompGEP1.VarIndices.empty()) { 1227 APInt GCD; 1228 bool AllNonNegative = DecompGEP1.Offset.isNonNegative(); 1229 bool AllNonPositive = DecompGEP1.Offset.isNonPositive(); 1230 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1231 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i]; 1232 const APInt &Scale = Index.Scale; 1233 APInt ScaleForGCD = Scale; 1234 if (!Index.IsNSW) 1235 ScaleForGCD = APInt::getOneBitSet(Scale.getBitWidth(), 1236 Scale.countTrailingZeros()); 1237 1238 if (i == 0) 1239 GCD = ScaleForGCD.abs(); 1240 else 1241 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs()); 1242 1243 if (AllNonNegative || AllNonPositive) { 1244 KnownBits Known = Index.Val.evaluateWith( 1245 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT)); 1246 // TODO: Account for implicit trunc. 1247 bool SignKnownZero = Known.isNonNegative(); 1248 bool SignKnownOne = Known.isNegative(); 1249 AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) || 1250 (SignKnownOne && Scale.isNonPositive()); 1251 AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) || 1252 (SignKnownOne && Scale.isNonNegative()); 1253 } 1254 } 1255 1256 // We now have accesses at two offsets from the same base: 1257 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size 1258 // 2. 0 with size V2Size 1259 // Using arithmetic modulo GCD, the accesses are at 1260 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits 1261 // into the range [V2Size..GCD), then we know they cannot overlap. 1262 APInt ModOffset = DecompGEP1.Offset.srem(GCD); 1263 if (ModOffset.isNegative()) 1264 ModOffset += GCD; // We want mod, not rem. 1265 if (V1Size.hasValue() && V2Size.hasValue() && 1266 ModOffset.uge(V2Size.getValue()) && 1267 (GCD - ModOffset).uge(V1Size.getValue())) 1268 return AliasResult::NoAlias; 1269 1270 // If we know all the variables are non-negative, then the total offset is 1271 // also non-negative and >= DecompGEP1.Offset. We have the following layout: 1272 // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size] 1273 // If DecompGEP1.Offset >= V2Size, the accesses don't alias. 1274 if (AllNonNegative && V2Size.hasValue() && 1275 DecompGEP1.Offset.uge(V2Size.getValue())) 1276 return AliasResult::NoAlias; 1277 // Similarly, if the variables are non-positive, then the total offset is 1278 // also non-positive and <= DecompGEP1.Offset. We have the following layout: 1279 // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size) 1280 // If -DecompGEP1.Offset >= V1Size, the accesses don't alias. 1281 if (AllNonPositive && V1Size.hasValue() && 1282 (-DecompGEP1.Offset).uge(V1Size.getValue())) 1283 return AliasResult::NoAlias; 1284 1285 if (V1Size.hasValue() && V2Size.hasValue()) { 1286 // Try to determine the range of values for VarIndex. 1287 // VarIndexRange is such that: 1288 // (VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex) && 1289 // VarIndexRange.contains(VarIndex) 1290 Optional<APInt> MinAbsVarIndex; 1291 Optional<ConstantRange> VarIndexRange; 1292 if (DecompGEP1.VarIndices.size() == 1) { 1293 // VarIndex = Scale*V. 1294 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0]; 1295 if (isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) { 1296 // If V != 0 then abs(VarIndex) >= abs(Scale). 1297 MinAbsVarIndex = Var.Scale.abs(); 1298 } 1299 ConstantRange R = Var.Val.evaluateWith( 1300 computeConstantRange(Var.Val.V, true, &AC, Var.CxtI)); 1301 if (!R.isFullSet() && !R.isEmptySet()) 1302 VarIndexRange = R.sextOrTrunc(Var.Scale.getBitWidth()) 1303 .multiply(ConstantRange(Var.Scale)); 1304 } else if (DecompGEP1.VarIndices.size() == 2) { 1305 // VarIndex = Scale*V0 + (-Scale)*V1. 1306 // If V0 != V1 then abs(VarIndex) >= abs(Scale). 1307 // Check that VisitedPhiBBs is empty, to avoid reasoning about 1308 // inequality of values across loop iterations. 1309 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0]; 1310 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1]; 1311 if (Var0.Scale == -Var1.Scale && 1312 Var0.Val.hasSameExtensionsAs(Var1.Val) && VisitedPhiBBs.empty() && 1313 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr, 1314 DT)) 1315 MinAbsVarIndex = Var0.Scale.abs(); 1316 } 1317 1318 if (MinAbsVarIndex) { 1319 // The constant offset will have added at least +/-MinAbsVarIndex to it. 1320 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex; 1321 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex; 1322 // We know that Offset <= OffsetLo || Offset >= OffsetHi 1323 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) && 1324 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue())) 1325 return AliasResult::NoAlias; 1326 } 1327 1328 if (VarIndexRange) { 1329 ConstantRange OffsetRange = 1330 VarIndexRange->add(ConstantRange(DecompGEP1.Offset)); 1331 1332 // We know that Offset >= MinOffset. 1333 // (MinOffset >= V2Size) => (Offset >= V2Size) => NoAlias. 1334 if (OffsetRange.getSignedMin().sge(V2Size.getValue())) 1335 return AliasResult::NoAlias; 1336 1337 // We know that Offset <= MaxOffset. 1338 // (MaxOffset <= -V1Size) => (Offset <= -V1Size) => NoAlias. 1339 if (OffsetRange.getSignedMax().sle(-V1Size.getValue())) 1340 return AliasResult::NoAlias; 1341 } 1342 } 1343 1344 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT)) 1345 return AliasResult::NoAlias; 1346 } 1347 1348 // Statically, we can see that the base objects are the same, but the 1349 // pointers have dynamic offsets which we can't resolve. And none of our 1350 // little tricks above worked. 1351 return AliasResult::MayAlias; 1352 } 1353 1354 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1355 // If the results agree, take it. 1356 if (A == B) 1357 return A; 1358 // A mix of PartialAlias and MustAlias is PartialAlias. 1359 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) || 1360 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias)) 1361 return AliasResult::PartialAlias; 1362 // Otherwise, we don't know anything. 1363 return AliasResult::MayAlias; 1364 } 1365 1366 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1367 /// against another. 1368 AliasResult 1369 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, 1370 const Value *V2, LocationSize V2Size, 1371 AAQueryInfo &AAQI) { 1372 // If the values are Selects with the same condition, we can do a more precise 1373 // check: just check for aliases between the values on corresponding arms. 1374 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1375 if (SI->getCondition() == SI2->getCondition()) { 1376 AliasResult Alias = getBestAAResults().alias( 1377 MemoryLocation(SI->getTrueValue(), SISize), 1378 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI); 1379 if (Alias == AliasResult::MayAlias) 1380 return AliasResult::MayAlias; 1381 AliasResult ThisAlias = getBestAAResults().alias( 1382 MemoryLocation(SI->getFalseValue(), SISize), 1383 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI); 1384 return MergeAliasResults(ThisAlias, Alias); 1385 } 1386 1387 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1388 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1389 AliasResult Alias = getBestAAResults().alias( 1390 MemoryLocation(V2, V2Size), 1391 MemoryLocation(SI->getTrueValue(), SISize), AAQI); 1392 if (Alias == AliasResult::MayAlias) 1393 return AliasResult::MayAlias; 1394 1395 AliasResult ThisAlias = getBestAAResults().alias( 1396 MemoryLocation(V2, V2Size), 1397 MemoryLocation(SI->getFalseValue(), SISize), AAQI); 1398 return MergeAliasResults(ThisAlias, Alias); 1399 } 1400 1401 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1402 /// another. 1403 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, 1404 const Value *V2, LocationSize V2Size, 1405 AAQueryInfo &AAQI) { 1406 if (!PN->getNumIncomingValues()) 1407 return AliasResult::NoAlias; 1408 // If the values are PHIs in the same block, we can do a more precise 1409 // as well as efficient check: just check for aliases between the values 1410 // on corresponding edges. 1411 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1412 if (PN2->getParent() == PN->getParent()) { 1413 Optional<AliasResult> Alias; 1414 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1415 AliasResult ThisAlias = getBestAAResults().alias( 1416 MemoryLocation(PN->getIncomingValue(i), PNSize), 1417 MemoryLocation( 1418 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size), 1419 AAQI); 1420 if (Alias) 1421 *Alias = MergeAliasResults(*Alias, ThisAlias); 1422 else 1423 Alias = ThisAlias; 1424 if (*Alias == AliasResult::MayAlias) 1425 break; 1426 } 1427 return *Alias; 1428 } 1429 1430 SmallVector<Value *, 4> V1Srcs; 1431 // If a phi operand recurses back to the phi, we can still determine NoAlias 1432 // if we don't alias the underlying objects of the other phi operands, as we 1433 // know that the recursive phi needs to be based on them in some way. 1434 bool isRecursive = false; 1435 auto CheckForRecPhi = [&](Value *PV) { 1436 if (!EnableRecPhiAnalysis) 1437 return false; 1438 if (getUnderlyingObject(PV) == PN) { 1439 isRecursive = true; 1440 return true; 1441 } 1442 return false; 1443 }; 1444 1445 if (PV) { 1446 // If we have PhiValues then use it to get the underlying phi values. 1447 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN); 1448 // If we have more phi values than the search depth then return MayAlias 1449 // conservatively to avoid compile time explosion. The worst possible case 1450 // is if both sides are PHI nodes. In which case, this is O(m x n) time 1451 // where 'm' and 'n' are the number of PHI sources. 1452 if (PhiValueSet.size() > MaxLookupSearchDepth) 1453 return AliasResult::MayAlias; 1454 // Add the values to V1Srcs 1455 for (Value *PV1 : PhiValueSet) { 1456 if (CheckForRecPhi(PV1)) 1457 continue; 1458 V1Srcs.push_back(PV1); 1459 } 1460 } else { 1461 // If we don't have PhiInfo then just look at the operands of the phi itself 1462 // FIXME: Remove this once we can guarantee that we have PhiInfo always 1463 SmallPtrSet<Value *, 4> UniqueSrc; 1464 Value *OnePhi = nullptr; 1465 for (Value *PV1 : PN->incoming_values()) { 1466 if (isa<PHINode>(PV1)) { 1467 if (OnePhi && OnePhi != PV1) { 1468 // To control potential compile time explosion, we choose to be 1469 // conserviate when we have more than one Phi input. It is important 1470 // that we handle the single phi case as that lets us handle LCSSA 1471 // phi nodes and (combined with the recursive phi handling) simple 1472 // pointer induction variable patterns. 1473 return AliasResult::MayAlias; 1474 } 1475 OnePhi = PV1; 1476 } 1477 1478 if (CheckForRecPhi(PV1)) 1479 continue; 1480 1481 if (UniqueSrc.insert(PV1).second) 1482 V1Srcs.push_back(PV1); 1483 } 1484 1485 if (OnePhi && UniqueSrc.size() > 1) 1486 // Out of an abundance of caution, allow only the trivial lcssa and 1487 // recursive phi cases. 1488 return AliasResult::MayAlias; 1489 } 1490 1491 // If V1Srcs is empty then that means that the phi has no underlying non-phi 1492 // value. This should only be possible in blocks unreachable from the entry 1493 // block, but return MayAlias just in case. 1494 if (V1Srcs.empty()) 1495 return AliasResult::MayAlias; 1496 1497 // If this PHI node is recursive, indicate that the pointer may be moved 1498 // across iterations. We can only prove NoAlias if different underlying 1499 // objects are involved. 1500 if (isRecursive) 1501 PNSize = LocationSize::beforeOrAfterPointer(); 1502 1503 // In the recursive alias queries below, we may compare values from two 1504 // different loop iterations. Keep track of visited phi blocks, which will 1505 // be used when determining value equivalence. 1506 bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second; 1507 auto _ = make_scope_exit([&]() { 1508 if (BlockInserted) 1509 VisitedPhiBBs.erase(PN->getParent()); 1510 }); 1511 1512 // If we inserted a block into VisitedPhiBBs, alias analysis results that 1513 // have been cached earlier may no longer be valid. Perform recursive queries 1514 // with a new AAQueryInfo. 1515 AAQueryInfo NewAAQI = AAQI.withEmptyCache(); 1516 AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI; 1517 1518 AliasResult Alias = getBestAAResults().alias( 1519 MemoryLocation(V2, V2Size), 1520 MemoryLocation(V1Srcs[0], PNSize), *UseAAQI); 1521 1522 // Early exit if the check of the first PHI source against V2 is MayAlias. 1523 // Other results are not possible. 1524 if (Alias == AliasResult::MayAlias) 1525 return AliasResult::MayAlias; 1526 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will 1527 // remain valid to all elements and needs to conservatively return MayAlias. 1528 if (isRecursive && Alias != AliasResult::NoAlias) 1529 return AliasResult::MayAlias; 1530 1531 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1532 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1533 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1534 Value *V = V1Srcs[i]; 1535 1536 AliasResult ThisAlias = getBestAAResults().alias( 1537 MemoryLocation(V2, V2Size), MemoryLocation(V, PNSize), *UseAAQI); 1538 Alias = MergeAliasResults(ThisAlias, Alias); 1539 if (Alias == AliasResult::MayAlias) 1540 break; 1541 } 1542 1543 return Alias; 1544 } 1545 1546 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1547 /// array references. 1548 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, 1549 const Value *V2, LocationSize V2Size, 1550 AAQueryInfo &AAQI) { 1551 // If either of the memory references is empty, it doesn't matter what the 1552 // pointer values are. 1553 if (V1Size.isZero() || V2Size.isZero()) 1554 return AliasResult::NoAlias; 1555 1556 // Strip off any casts if they exist. 1557 V1 = V1->stripPointerCastsForAliasAnalysis(); 1558 V2 = V2->stripPointerCastsForAliasAnalysis(); 1559 1560 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1561 // value for undef that aliases nothing in the program. 1562 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1563 return AliasResult::NoAlias; 1564 1565 // Are we checking for alias of the same value? 1566 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1567 // different iterations. We must therefore make sure that this is not the 1568 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1569 // happen by looking at the visited phi nodes and making sure they cannot 1570 // reach the value. 1571 if (isValueEqualInPotentialCycles(V1, V2)) 1572 return AliasResult::MustAlias; 1573 1574 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1575 return AliasResult::NoAlias; // Scalars cannot alias each other 1576 1577 // Figure out what objects these things are pointing to if we can. 1578 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth); 1579 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth); 1580 1581 // Null values in the default address space don't point to any object, so they 1582 // don't alias any other pointer. 1583 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1584 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1585 return AliasResult::NoAlias; 1586 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1587 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1588 return AliasResult::NoAlias; 1589 1590 if (O1 != O2) { 1591 // If V1/V2 point to two different objects, we know that we have no alias. 1592 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1593 return AliasResult::NoAlias; 1594 1595 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1596 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1597 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1598 return AliasResult::NoAlias; 1599 1600 // Function arguments can't alias with things that are known to be 1601 // unambigously identified at the function level. 1602 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1603 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1604 return AliasResult::NoAlias; 1605 1606 // If one pointer is the result of a call/invoke or load and the other is a 1607 // non-escaping local object within the same function, then we know the 1608 // object couldn't escape to a point where the call could return it. 1609 // 1610 // Note that if the pointers are in different functions, there are a 1611 // variety of complications. A call with a nocapture argument may still 1612 // temporary store the nocapture argument's value in a temporary memory 1613 // location if that memory location doesn't escape. Or it may pass a 1614 // nocapture value to other functions as long as they don't capture it. 1615 if (isEscapeSource(O1) && 1616 AAQI.CI->isNotCapturedBeforeOrAt(O2, cast<Instruction>(O1))) 1617 return AliasResult::NoAlias; 1618 if (isEscapeSource(O2) && 1619 AAQI.CI->isNotCapturedBeforeOrAt(O1, cast<Instruction>(O2))) 1620 return AliasResult::NoAlias; 1621 } 1622 1623 // If the size of one access is larger than the entire object on the other 1624 // side, then we know such behavior is undefined and can assume no alias. 1625 bool NullIsValidLocation = NullPointerIsDefined(&F); 1626 if ((isObjectSmallerThan( 1627 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL, 1628 TLI, NullIsValidLocation)) || 1629 (isObjectSmallerThan( 1630 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL, 1631 TLI, NullIsValidLocation))) 1632 return AliasResult::NoAlias; 1633 1634 // If one the accesses may be before the accessed pointer, canonicalize this 1635 // by using unknown after-pointer sizes for both accesses. This is 1636 // equivalent, because regardless of which pointer is lower, one of them 1637 // will always came after the other, as long as the underlying objects aren't 1638 // disjoint. We do this so that the rest of BasicAA does not have to deal 1639 // with accesses before the base pointer, and to improve cache utilization by 1640 // merging equivalent states. 1641 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) { 1642 V1Size = LocationSize::afterPointer(); 1643 V2Size = LocationSize::afterPointer(); 1644 } 1645 1646 // FIXME: If this depth limit is hit, then we may cache sub-optimal results 1647 // for recursive queries. For this reason, this limit is chosen to be large 1648 // enough to be very rarely hit, while still being small enough to avoid 1649 // stack overflows. 1650 if (AAQI.Depth >= 512) 1651 return AliasResult::MayAlias; 1652 1653 // Check the cache before climbing up use-def chains. This also terminates 1654 // otherwise infinitely recursive queries. 1655 AAQueryInfo::LocPair Locs({V1, V1Size}, {V2, V2Size}); 1656 const bool Swapped = V1 > V2; 1657 if (Swapped) 1658 std::swap(Locs.first, Locs.second); 1659 const auto &Pair = AAQI.AliasCache.try_emplace( 1660 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0}); 1661 if (!Pair.second) { 1662 auto &Entry = Pair.first->second; 1663 if (!Entry.isDefinitive()) { 1664 // Remember that we used an assumption. 1665 ++Entry.NumAssumptionUses; 1666 ++AAQI.NumAssumptionUses; 1667 } 1668 // Cache contains sorted {V1,V2} pairs but we should return original order. 1669 auto Result = Entry.Result; 1670 Result.swap(Swapped); 1671 return Result; 1672 } 1673 1674 int OrigNumAssumptionUses = AAQI.NumAssumptionUses; 1675 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size(); 1676 AliasResult Result = 1677 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2); 1678 1679 auto It = AAQI.AliasCache.find(Locs); 1680 assert(It != AAQI.AliasCache.end() && "Must be in cache"); 1681 auto &Entry = It->second; 1682 1683 // Check whether a NoAlias assumption has been used, but disproven. 1684 bool AssumptionDisproven = 1685 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias; 1686 if (AssumptionDisproven) 1687 Result = AliasResult::MayAlias; 1688 1689 // This is a definitive result now, when considered as a root query. 1690 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses; 1691 Entry.Result = Result; 1692 // Cache contains sorted {V1,V2} pairs. 1693 Entry.Result.swap(Swapped); 1694 Entry.NumAssumptionUses = -1; 1695 1696 // If the assumption has been disproven, remove any results that may have 1697 // been based on this assumption. Do this after the Entry updates above to 1698 // avoid iterator invalidation. 1699 if (AssumptionDisproven) 1700 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults) 1701 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val()); 1702 1703 // The result may still be based on assumptions higher up in the chain. 1704 // Remember it, so it can be purged from the cache later. 1705 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses && 1706 Result != AliasResult::MayAlias) 1707 AAQI.AssumptionBasedResults.push_back(Locs); 1708 return Result; 1709 } 1710 1711 AliasResult BasicAAResult::aliasCheckRecursive( 1712 const Value *V1, LocationSize V1Size, 1713 const Value *V2, LocationSize V2Size, 1714 AAQueryInfo &AAQI, const Value *O1, const Value *O2) { 1715 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1716 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI); 1717 if (Result != AliasResult::MayAlias) 1718 return Result; 1719 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) { 1720 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI); 1721 if (Result != AliasResult::MayAlias) 1722 return Result; 1723 } 1724 1725 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1726 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI); 1727 if (Result != AliasResult::MayAlias) 1728 return Result; 1729 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) { 1730 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI); 1731 if (Result != AliasResult::MayAlias) 1732 return Result; 1733 } 1734 1735 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1736 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI); 1737 if (Result != AliasResult::MayAlias) 1738 return Result; 1739 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) { 1740 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI); 1741 if (Result != AliasResult::MayAlias) 1742 return Result; 1743 } 1744 1745 // If both pointers are pointing into the same object and one of them 1746 // accesses the entire object, then the accesses must overlap in some way. 1747 if (O1 == O2) { 1748 bool NullIsValidLocation = NullPointerIsDefined(&F); 1749 if (V1Size.isPrecise() && V2Size.isPrecise() && 1750 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || 1751 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) 1752 return AliasResult::PartialAlias; 1753 } 1754 1755 return AliasResult::MayAlias; 1756 } 1757 1758 /// Check whether two Values can be considered equivalent. 1759 /// 1760 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1761 /// they can not be part of a cycle in the value graph by looking at all 1762 /// visited phi nodes an making sure that the phis cannot reach the value. We 1763 /// have to do this because we are looking through phi nodes (That is we say 1764 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1765 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1766 const Value *V2) { 1767 if (V != V2) 1768 return false; 1769 1770 const Instruction *Inst = dyn_cast<Instruction>(V); 1771 if (!Inst) 1772 return true; 1773 1774 if (VisitedPhiBBs.empty()) 1775 return true; 1776 1777 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1778 return false; 1779 1780 // Make sure that the visited phis cannot reach the Value. This ensures that 1781 // the Values cannot come from different iterations of a potential cycle the 1782 // phi nodes could be involved in. 1783 for (auto *P : VisitedPhiBBs) 1784 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT)) 1785 return false; 1786 1787 return true; 1788 } 1789 1790 /// Computes the symbolic difference between two de-composed GEPs. 1791 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP, 1792 const DecomposedGEP &SrcGEP) { 1793 DestGEP.Offset -= SrcGEP.Offset; 1794 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) { 1795 // Find V in Dest. This is N^2, but pointer indices almost never have more 1796 // than a few variable indexes. 1797 bool Found = false; 1798 for (auto I : enumerate(DestGEP.VarIndices)) { 1799 VariableGEPIndex &Dest = I.value(); 1800 if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V) || 1801 !Dest.Val.hasSameExtensionsAs(Src.Val)) 1802 continue; 1803 1804 // If we found it, subtract off Scale V's from the entry in Dest. If it 1805 // goes to zero, remove the entry. 1806 if (Dest.Scale != Src.Scale) { 1807 Dest.Scale -= Src.Scale; 1808 Dest.IsNSW = false; 1809 } else { 1810 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index()); 1811 } 1812 Found = true; 1813 break; 1814 } 1815 1816 // If we didn't consume this entry, add it to the end of the Dest list. 1817 if (!Found) { 1818 VariableGEPIndex Entry = {Src.Val, -Src.Scale, Src.CxtI, Src.IsNSW}; 1819 DestGEP.VarIndices.push_back(Entry); 1820 } 1821 } 1822 } 1823 1824 bool BasicAAResult::constantOffsetHeuristic( 1825 const DecomposedGEP &GEP, LocationSize MaybeV1Size, 1826 LocationSize MaybeV2Size, AssumptionCache *AC, DominatorTree *DT) { 1827 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() || 1828 !MaybeV2Size.hasValue()) 1829 return false; 1830 1831 const uint64_t V1Size = MaybeV1Size.getValue(); 1832 const uint64_t V2Size = MaybeV2Size.getValue(); 1833 1834 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1]; 1835 1836 if (!Var0.Val.hasSameExtensionsAs(Var1.Val) || Var0.Scale != -Var1.Scale || 1837 Var0.Val.V->getType() != Var1.Val.V->getType()) 1838 return false; 1839 1840 // We'll strip off the Extensions of Var0 and Var1 and do another round 1841 // of GetLinearExpression decomposition. In the example above, if Var0 1842 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 1843 1844 LinearExpression E0 = 1845 GetLinearExpression(ExtendedValue(Var0.Val.V), DL, 0, AC, DT); 1846 LinearExpression E1 = 1847 GetLinearExpression(ExtendedValue(Var1.Val.V), DL, 0, AC, DT); 1848 if (E0.Scale != E1.Scale || !E0.Val.hasSameExtensionsAs(E1.Val) || 1849 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V)) 1850 return false; 1851 1852 // We have a hit - Var0 and Var1 only differ by a constant offset! 1853 1854 // If we've been sext'ed then zext'd the maximum difference between Var0 and 1855 // Var1 is possible to calculate, but we're just interested in the absolute 1856 // minimum difference between the two. The minimum distance may occur due to 1857 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 1858 // the minimum distance between %i and %i + 5 is 3. 1859 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff; 1860 MinDiff = APIntOps::umin(MinDiff, Wrapped); 1861 APInt MinDiffBytes = 1862 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs(); 1863 1864 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 1865 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 1866 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 1867 // V2Size can fit in the MinDiffBytes gap. 1868 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) && 1869 MinDiffBytes.uge(V2Size + GEP.Offset.abs()); 1870 } 1871 1872 //===----------------------------------------------------------------------===// 1873 // BasicAliasAnalysis Pass 1874 //===----------------------------------------------------------------------===// 1875 1876 AnalysisKey BasicAA::Key; 1877 1878 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 1879 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1880 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1881 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1882 auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F); 1883 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV); 1884 } 1885 1886 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 1887 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 1888 } 1889 1890 char BasicAAWrapperPass::ID = 0; 1891 1892 void BasicAAWrapperPass::anchor() {} 1893 1894 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", 1895 "Basic Alias Analysis (stateless AA impl)", true, true) 1896 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1897 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1898 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1899 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 1900 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa", 1901 "Basic Alias Analysis (stateless AA impl)", true, true) 1902 1903 FunctionPass *llvm::createBasicAAWrapperPass() { 1904 return new BasicAAWrapperPass(); 1905 } 1906 1907 bool BasicAAWrapperPass::runOnFunction(Function &F) { 1908 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 1909 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 1910 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 1911 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>(); 1912 1913 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, 1914 TLIWP.getTLI(F), ACT.getAssumptionCache(F), 1915 &DTWP.getDomTree(), 1916 PVWP ? &PVWP->getResult() : nullptr)); 1917 1918 return false; 1919 } 1920 1921 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1922 AU.setPreservesAll(); 1923 AU.addRequiredTransitive<AssumptionCacheTracker>(); 1924 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 1925 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 1926 AU.addUsedIfAvailable<PhiValuesWrapperPass>(); 1927 } 1928 1929 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 1930 return BasicAAResult( 1931 F.getParent()->getDataLayout(), F, 1932 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 1933 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 1934 } 1935