1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the primary stateless implementation of the 10 // Alias Analysis interface that implements identities (two different 11 // globals cannot alias, etc), but does no stateful analysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/BasicAliasAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/AssumptionCache.h" 22 #include "llvm/Analysis/CFG.h" 23 #include "llvm/Analysis/CaptureTracking.h" 24 #include "llvm/Analysis/InstructionSimplify.h" 25 #include "llvm/Analysis/LoopInfo.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/MemoryLocation.h" 28 #include "llvm/Analysis/PhiValues.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/Dominators.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/IR/GetElementPtrTypeIterator.h" 40 #include "llvm/IR/GlobalAlias.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/InstrTypes.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/IR/Intrinsics.h" 47 #include "llvm/IR/Metadata.h" 48 #include "llvm/IR/Operator.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/IR/User.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/InitializePasses.h" 53 #include "llvm/Pass.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/KnownBits.h" 58 #include <cassert> 59 #include <cstdint> 60 #include <cstdlib> 61 #include <utility> 62 63 #define DEBUG_TYPE "basicaa" 64 65 using namespace llvm; 66 67 /// Enable analysis of recursive PHI nodes. 68 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden, 69 cl::init(false)); 70 71 /// By default, even on 32-bit architectures we use 64-bit integers for 72 /// calculations. This will allow us to more-aggressively decompose indexing 73 /// expressions calculated using i64 values (e.g., long long in C) which is 74 /// common enough to worry about. 75 static cl::opt<bool> ForceAtLeast64Bits("basicaa-force-at-least-64b", 76 cl::Hidden, cl::init(true)); 77 static cl::opt<bool> DoubleCalcBits("basicaa-double-calc-bits", 78 cl::Hidden, cl::init(false)); 79 80 /// SearchLimitReached / SearchTimes shows how often the limit of 81 /// to decompose GEPs is reached. It will affect the precision 82 /// of basic alias analysis. 83 STATISTIC(SearchLimitReached, "Number of times the limit to " 84 "decompose GEPs is reached"); 85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 86 87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 89 /// careful with value equivalence. We use reachability to make sure a value 90 /// cannot be involved in a cycle. 91 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 92 93 // The max limit of the search depth in DecomposeGEPExpression() and 94 // GetUnderlyingObject(), both functions need to use the same search 95 // depth otherwise the algorithm in aliasGEP will assert. 96 static const unsigned MaxLookupSearchDepth = 6; 97 98 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, 99 FunctionAnalysisManager::Invalidator &Inv) { 100 // We don't care if this analysis itself is preserved, it has no state. But 101 // we need to check that the analyses it depends on have been. Note that we 102 // may be created without handles to some analyses and in that case don't 103 // depend on them. 104 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) || 105 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) || 106 (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) || 107 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA))) 108 return true; 109 110 // Otherwise this analysis result remains valid. 111 return false; 112 } 113 114 //===----------------------------------------------------------------------===// 115 // Useful predicates 116 //===----------------------------------------------------------------------===// 117 118 /// Returns true if the pointer is to a function-local object that never 119 /// escapes from the function. 120 static bool isNonEscapingLocalObject( 121 const Value *V, 122 SmallDenseMap<const Value *, bool, 8> *IsCapturedCache = nullptr) { 123 SmallDenseMap<const Value *, bool, 8>::iterator CacheIt; 124 if (IsCapturedCache) { 125 bool Inserted; 126 std::tie(CacheIt, Inserted) = IsCapturedCache->insert({V, false}); 127 if (!Inserted) 128 // Found cached result, return it! 129 return CacheIt->second; 130 } 131 132 // If this is a local allocation, check to see if it escapes. 133 if (isa<AllocaInst>(V) || isNoAliasCall(V)) { 134 // Set StoreCaptures to True so that we can assume in our callers that the 135 // pointer is not the result of a load instruction. Currently 136 // PointerMayBeCaptured doesn't have any special analysis for the 137 // StoreCaptures=false case; if it did, our callers could be refined to be 138 // more precise. 139 auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 140 if (IsCapturedCache) 141 CacheIt->second = Ret; 142 return Ret; 143 } 144 145 // If this is an argument that corresponds to a byval or noalias argument, 146 // then it has not escaped before entering the function. Check if it escapes 147 // inside the function. 148 if (const Argument *A = dyn_cast<Argument>(V)) 149 if (A->hasByValAttr() || A->hasNoAliasAttr()) { 150 // Note even if the argument is marked nocapture, we still need to check 151 // for copies made inside the function. The nocapture attribute only 152 // specifies that there are no copies made that outlive the function. 153 auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 154 if (IsCapturedCache) 155 CacheIt->second = Ret; 156 return Ret; 157 } 158 159 return false; 160 } 161 162 /// Returns true if the pointer is one which would have been considered an 163 /// escape by isNonEscapingLocalObject. 164 static bool isEscapeSource(const Value *V) { 165 if (isa<CallBase>(V)) 166 return true; 167 168 if (isa<Argument>(V)) 169 return true; 170 171 // The load case works because isNonEscapingLocalObject considers all 172 // stores to be escapes (it passes true for the StoreCaptures argument 173 // to PointerMayBeCaptured). 174 if (isa<LoadInst>(V)) 175 return true; 176 177 return false; 178 } 179 180 /// Returns the size of the object specified by V or UnknownSize if unknown. 181 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 182 const TargetLibraryInfo &TLI, 183 bool NullIsValidLoc, 184 bool RoundToAlign = false) { 185 uint64_t Size; 186 ObjectSizeOpts Opts; 187 Opts.RoundToAlign = RoundToAlign; 188 Opts.NullIsUnknownSize = NullIsValidLoc; 189 if (getObjectSize(V, Size, DL, &TLI, Opts)) 190 return Size; 191 return MemoryLocation::UnknownSize; 192 } 193 194 /// Returns true if we can prove that the object specified by V is smaller than 195 /// Size. 196 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 197 const DataLayout &DL, 198 const TargetLibraryInfo &TLI, 199 bool NullIsValidLoc) { 200 // Note that the meanings of the "object" are slightly different in the 201 // following contexts: 202 // c1: llvm::getObjectSize() 203 // c2: llvm.objectsize() intrinsic 204 // c3: isObjectSmallerThan() 205 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 206 // refers to the "entire object". 207 // 208 // Consider this example: 209 // char *p = (char*)malloc(100) 210 // char *q = p+80; 211 // 212 // In the context of c1 and c2, the "object" pointed by q refers to the 213 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 214 // 215 // However, in the context of c3, the "object" refers to the chunk of memory 216 // being allocated. So, the "object" has 100 bytes, and q points to the middle 217 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 218 // parameter, before the llvm::getObjectSize() is called to get the size of 219 // entire object, we should: 220 // - either rewind the pointer q to the base-address of the object in 221 // question (in this case rewind to p), or 222 // - just give up. It is up to caller to make sure the pointer is pointing 223 // to the base address the object. 224 // 225 // We go for 2nd option for simplicity. 226 if (!isIdentifiedObject(V)) 227 return false; 228 229 // This function needs to use the aligned object size because we allow 230 // reads a bit past the end given sufficient alignment. 231 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, 232 /*RoundToAlign*/ true); 233 234 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 235 } 236 237 /// Return the minimal extent from \p V to the end of the underlying object, 238 /// assuming the result is used in an aliasing query. E.g., we do use the query 239 /// location size and the fact that null pointers cannot alias here. 240 static uint64_t getMinimalExtentFrom(const Value &V, 241 const LocationSize &LocSize, 242 const DataLayout &DL, 243 bool NullIsValidLoc) { 244 // If we have dereferenceability information we know a lower bound for the 245 // extent as accesses for a lower offset would be valid. We need to exclude 246 // the "or null" part if null is a valid pointer. 247 bool CanBeNull; 248 uint64_t DerefBytes = V.getPointerDereferenceableBytes(DL, CanBeNull); 249 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; 250 // If queried with a precise location size, we assume that location size to be 251 // accessed, thus valid. 252 if (LocSize.isPrecise()) 253 DerefBytes = std::max(DerefBytes, LocSize.getValue()); 254 return DerefBytes; 255 } 256 257 /// Returns true if we can prove that the object specified by V has size Size. 258 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 259 const TargetLibraryInfo &TLI, bool NullIsValidLoc) { 260 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc); 261 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 262 } 263 264 //===----------------------------------------------------------------------===// 265 // GetElementPtr Instruction Decomposition and Analysis 266 //===----------------------------------------------------------------------===// 267 268 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 269 /// B are constant integers. 270 /// 271 /// Returns the scale and offset values as APInts and return V as a Value*, and 272 /// return whether we looked through any sign or zero extends. The incoming 273 /// Value is known to have IntegerType, and it may already be sign or zero 274 /// extended. 275 /// 276 /// Note that this looks through extends, so the high bits may not be 277 /// represented in the result. 278 /*static*/ const Value *BasicAAResult::GetLinearExpression( 279 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits, 280 unsigned &SExtBits, const DataLayout &DL, unsigned Depth, 281 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) { 282 assert(V->getType()->isIntegerTy() && "Not an integer value"); 283 284 // Limit our recursion depth. 285 if (Depth == 6) { 286 Scale = 1; 287 Offset = 0; 288 return V; 289 } 290 291 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) { 292 // If it's a constant, just convert it to an offset and remove the variable. 293 // If we've been called recursively, the Offset bit width will be greater 294 // than the constant's (the Offset's always as wide as the outermost call), 295 // so we'll zext here and process any extension in the isa<SExtInst> & 296 // isa<ZExtInst> cases below. 297 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth()); 298 assert(Scale == 0 && "Constant values don't have a scale"); 299 return V; 300 } 301 302 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 303 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 304 // If we've been called recursively, then Offset and Scale will be wider 305 // than the BOp operands. We'll always zext it here as we'll process sign 306 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases). 307 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth()); 308 309 switch (BOp->getOpcode()) { 310 default: 311 // We don't understand this instruction, so we can't decompose it any 312 // further. 313 Scale = 1; 314 Offset = 0; 315 return V; 316 case Instruction::Or: 317 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 318 // analyze it. 319 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 320 BOp, DT)) { 321 Scale = 1; 322 Offset = 0; 323 return V; 324 } 325 LLVM_FALLTHROUGH; 326 case Instruction::Add: 327 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 328 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 329 Offset += RHS; 330 break; 331 case Instruction::Sub: 332 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 333 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 334 Offset -= RHS; 335 break; 336 case Instruction::Mul: 337 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 338 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 339 Offset *= RHS; 340 Scale *= RHS; 341 break; 342 case Instruction::Shl: 343 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 344 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 345 346 // We're trying to linearize an expression of the kind: 347 // shl i8 -128, 36 348 // where the shift count exceeds the bitwidth of the type. 349 // We can't decompose this further (the expression would return 350 // a poison value). 351 if (Offset.getBitWidth() < RHS.getLimitedValue() || 352 Scale.getBitWidth() < RHS.getLimitedValue()) { 353 Scale = 1; 354 Offset = 0; 355 return V; 356 } 357 358 Offset <<= RHS.getLimitedValue(); 359 Scale <<= RHS.getLimitedValue(); 360 // the semantics of nsw and nuw for left shifts don't match those of 361 // multiplications, so we won't propagate them. 362 NSW = NUW = false; 363 return V; 364 } 365 366 if (isa<OverflowingBinaryOperator>(BOp)) { 367 NUW &= BOp->hasNoUnsignedWrap(); 368 NSW &= BOp->hasNoSignedWrap(); 369 } 370 return V; 371 } 372 } 373 374 // Since GEP indices are sign extended anyway, we don't care about the high 375 // bits of a sign or zero extended value - just scales and offsets. The 376 // extensions have to be consistent though. 377 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) { 378 Value *CastOp = cast<CastInst>(V)->getOperand(0); 379 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits(); 380 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 381 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits; 382 const Value *Result = 383 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL, 384 Depth + 1, AC, DT, NSW, NUW); 385 386 // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this 387 // by just incrementing the number of bits we've extended by. 388 unsigned ExtendedBy = NewWidth - SmallWidth; 389 390 if (isa<SExtInst>(V) && ZExtBits == 0) { 391 // sext(sext(%x, a), b) == sext(%x, a + b) 392 393 if (NSW) { 394 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c) 395 // into sext(%x) + sext(c). We'll sext the Offset ourselves: 396 unsigned OldWidth = Offset.getBitWidth(); 397 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth); 398 } else { 399 // We may have signed-wrapped, so don't decompose sext(%x + c) into 400 // sext(%x) + sext(c) 401 Scale = 1; 402 Offset = 0; 403 Result = CastOp; 404 ZExtBits = OldZExtBits; 405 SExtBits = OldSExtBits; 406 } 407 SExtBits += ExtendedBy; 408 } else { 409 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b) 410 411 if (!NUW) { 412 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into 413 // zext(%x) + zext(c) 414 Scale = 1; 415 Offset = 0; 416 Result = CastOp; 417 ZExtBits = OldZExtBits; 418 SExtBits = OldSExtBits; 419 } 420 ZExtBits += ExtendedBy; 421 } 422 423 return Result; 424 } 425 426 Scale = 1; 427 Offset = 0; 428 return V; 429 } 430 431 /// To ensure a pointer offset fits in an integer of size PointerSize 432 /// (in bits) when that size is smaller than the maximum pointer size. This is 433 /// an issue, for example, in particular for 32b pointers with negative indices 434 /// that rely on two's complement wrap-arounds for precise alias information 435 /// where the maximum pointer size is 64b. 436 static APInt adjustToPointerSize(APInt Offset, unsigned PointerSize) { 437 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!"); 438 unsigned ShiftBits = Offset.getBitWidth() - PointerSize; 439 return (Offset << ShiftBits).ashr(ShiftBits); 440 } 441 442 static unsigned getMaxPointerSize(const DataLayout &DL) { 443 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits(); 444 if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64; 445 if (DoubleCalcBits) MaxPointerSize *= 2; 446 447 return MaxPointerSize; 448 } 449 450 /// If V is a symbolic pointer expression, decompose it into a base pointer 451 /// with a constant offset and a number of scaled symbolic offsets. 452 /// 453 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 454 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 455 /// specified amount, but which may have other unrepresented high bits. As 456 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 457 /// 458 /// When DataLayout is around, this function is capable of analyzing everything 459 /// that GetUnderlyingObject can look through. To be able to do that 460 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search 461 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks 462 /// through pointer casts. 463 bool BasicAAResult::DecomposeGEPExpression(const Value *V, 464 DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC, 465 DominatorTree *DT) { 466 // Limit recursion depth to limit compile time in crazy cases. 467 unsigned MaxLookup = MaxLookupSearchDepth; 468 SearchTimes++; 469 470 unsigned MaxPointerSize = getMaxPointerSize(DL); 471 Decomposed.VarIndices.clear(); 472 do { 473 // See if this is a bitcast or GEP. 474 const Operator *Op = dyn_cast<Operator>(V); 475 if (!Op) { 476 // The only non-operator case we can handle are GlobalAliases. 477 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 478 if (!GA->isInterposable()) { 479 V = GA->getAliasee(); 480 continue; 481 } 482 } 483 Decomposed.Base = V; 484 return false; 485 } 486 487 if (Op->getOpcode() == Instruction::BitCast || 488 Op->getOpcode() == Instruction::AddrSpaceCast) { 489 V = Op->getOperand(0); 490 continue; 491 } 492 493 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 494 if (!GEPOp) { 495 if (const auto *Call = dyn_cast<CallBase>(V)) { 496 // CaptureTracking can know about special capturing properties of some 497 // intrinsics like launder.invariant.group, that can't be expressed with 498 // the attributes, but have properties like returning aliasing pointer. 499 // Because some analysis may assume that nocaptured pointer is not 500 // returned from some special intrinsic (because function would have to 501 // be marked with returns attribute), it is crucial to use this function 502 // because it should be in sync with CaptureTracking. Not using it may 503 // cause weird miscompilations where 2 aliasing pointers are assumed to 504 // noalias. 505 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 506 V = RP; 507 continue; 508 } 509 } 510 511 // If it's not a GEP, hand it off to SimplifyInstruction to see if it 512 // can come up with something. This matches what GetUnderlyingObject does. 513 if (const Instruction *I = dyn_cast<Instruction>(V)) 514 // TODO: Get a DominatorTree and AssumptionCache and use them here 515 // (these are both now available in this function, but this should be 516 // updated when GetUnderlyingObject is updated). TLI should be 517 // provided also. 518 if (const Value *Simplified = 519 SimplifyInstruction(const_cast<Instruction *>(I), DL)) { 520 V = Simplified; 521 continue; 522 } 523 524 Decomposed.Base = V; 525 return false; 526 } 527 528 // Don't attempt to analyze GEPs over unsized objects. 529 if (!GEPOp->getSourceElementType()->isSized()) { 530 Decomposed.Base = V; 531 return false; 532 } 533 534 // Don't attempt to analyze GEPs if index scale is not a compile-time 535 // constant. 536 Type *SrcEleTy = GEPOp->getSourceElementType(); 537 if (SrcEleTy->isVectorTy() && cast<VectorType>(SrcEleTy)->isScalable()) { 538 Decomposed.Base = V; 539 Decomposed.HasCompileTimeConstantScale = false; 540 return false; 541 } 542 543 unsigned AS = GEPOp->getPointerAddressSpace(); 544 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 545 gep_type_iterator GTI = gep_type_begin(GEPOp); 546 unsigned PointerSize = DL.getPointerSizeInBits(AS); 547 // Assume all GEP operands are constants until proven otherwise. 548 bool GepHasConstantOffset = true; 549 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 550 I != E; ++I, ++GTI) { 551 const Value *Index = *I; 552 // Compute the (potentially symbolic) offset in bytes for this index. 553 if (StructType *STy = GTI.getStructTypeOrNull()) { 554 // For a struct, add the member offset. 555 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 556 if (FieldNo == 0) 557 continue; 558 559 Decomposed.StructOffset += 560 DL.getStructLayout(STy)->getElementOffset(FieldNo); 561 continue; 562 } 563 564 // For an array/pointer, add the element offset, explicitly scaled. 565 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 566 if (CIdx->isZero()) 567 continue; 568 Decomposed.OtherOffset += 569 (DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() * 570 CIdx->getValue().sextOrSelf(MaxPointerSize)) 571 .sextOrTrunc(MaxPointerSize); 572 continue; 573 } 574 575 GepHasConstantOffset = false; 576 577 APInt Scale(MaxPointerSize, 578 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); 579 unsigned ZExtBits = 0, SExtBits = 0; 580 581 // If the integer type is smaller than the pointer size, it is implicitly 582 // sign extended to pointer size. 583 unsigned Width = Index->getType()->getIntegerBitWidth(); 584 if (PointerSize > Width) 585 SExtBits += PointerSize - Width; 586 587 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 588 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 589 bool NSW = true, NUW = true; 590 const Value *OrigIndex = Index; 591 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits, 592 SExtBits, DL, 0, AC, DT, NSW, NUW); 593 594 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 595 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 596 597 // It can be the case that, even through C1*V+C2 does not overflow for 598 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot 599 // decompose the expression in this way. 600 // 601 // FIXME: C1*Scale and the other operations in the decomposed 602 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this 603 // possibility. 604 APInt WideScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize*2) * 605 Scale.sext(MaxPointerSize*2); 606 if (WideScaledOffset.getMinSignedBits() > MaxPointerSize) { 607 Index = OrigIndex; 608 IndexScale = 1; 609 IndexOffset = 0; 610 611 ZExtBits = SExtBits = 0; 612 if (PointerSize > Width) 613 SExtBits += PointerSize - Width; 614 } else { 615 Decomposed.OtherOffset += IndexOffset.sextOrTrunc(MaxPointerSize) * Scale; 616 Scale *= IndexScale.sextOrTrunc(MaxPointerSize); 617 } 618 619 // If we already had an occurrence of this index variable, merge this 620 // scale into it. For example, we want to handle: 621 // A[x][x] -> x*16 + x*4 -> x*20 622 // This also ensures that 'x' only appears in the index list once. 623 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 624 if (Decomposed.VarIndices[i].V == Index && 625 Decomposed.VarIndices[i].ZExtBits == ZExtBits && 626 Decomposed.VarIndices[i].SExtBits == SExtBits) { 627 Scale += Decomposed.VarIndices[i].Scale; 628 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 629 break; 630 } 631 } 632 633 // Make sure that we have a scale that makes sense for this target's 634 // pointer size. 635 Scale = adjustToPointerSize(Scale, PointerSize); 636 637 if (!!Scale) { 638 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale}; 639 Decomposed.VarIndices.push_back(Entry); 640 } 641 } 642 643 // Take care of wrap-arounds 644 if (GepHasConstantOffset) { 645 Decomposed.StructOffset = 646 adjustToPointerSize(Decomposed.StructOffset, PointerSize); 647 Decomposed.OtherOffset = 648 adjustToPointerSize(Decomposed.OtherOffset, PointerSize); 649 } 650 651 // Analyze the base pointer next. 652 V = GEPOp->getOperand(0); 653 } while (--MaxLookup); 654 655 // If the chain of expressions is too deep, just return early. 656 Decomposed.Base = V; 657 SearchLimitReached++; 658 return true; 659 } 660 661 /// Returns whether the given pointer value points to memory that is local to 662 /// the function, with global constants being considered local to all 663 /// functions. 664 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 665 AAQueryInfo &AAQI, bool OrLocal) { 666 assert(Visited.empty() && "Visited must be cleared after use!"); 667 668 unsigned MaxLookup = 8; 669 SmallVector<const Value *, 16> Worklist; 670 Worklist.push_back(Loc.Ptr); 671 do { 672 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL); 673 if (!Visited.insert(V).second) { 674 Visited.clear(); 675 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 676 } 677 678 // An alloca instruction defines local memory. 679 if (OrLocal && isa<AllocaInst>(V)) 680 continue; 681 682 // A global constant counts as local memory for our purposes. 683 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 684 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 685 // global to be marked constant in some modules and non-constant in 686 // others. GV may even be a declaration, not a definition. 687 if (!GV->isConstant()) { 688 Visited.clear(); 689 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 690 } 691 continue; 692 } 693 694 // If both select values point to local memory, then so does the select. 695 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 696 Worklist.push_back(SI->getTrueValue()); 697 Worklist.push_back(SI->getFalseValue()); 698 continue; 699 } 700 701 // If all values incoming to a phi node point to local memory, then so does 702 // the phi. 703 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 704 // Don't bother inspecting phi nodes with many operands. 705 if (PN->getNumIncomingValues() > MaxLookup) { 706 Visited.clear(); 707 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 708 } 709 for (Value *IncValue : PN->incoming_values()) 710 Worklist.push_back(IncValue); 711 continue; 712 } 713 714 // Otherwise be conservative. 715 Visited.clear(); 716 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 717 } while (!Worklist.empty() && --MaxLookup); 718 719 Visited.clear(); 720 return Worklist.empty(); 721 } 722 723 /// Returns the behavior when calling the given call site. 724 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { 725 if (Call->doesNotAccessMemory()) 726 // Can't do better than this. 727 return FMRB_DoesNotAccessMemory; 728 729 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 730 731 // If the callsite knows it only reads memory, don't return worse 732 // than that. 733 if (Call->onlyReadsMemory()) 734 Min = FMRB_OnlyReadsMemory; 735 else if (Call->doesNotReadMemory()) 736 Min = FMRB_OnlyWritesMemory; 737 738 if (Call->onlyAccessesArgMemory()) 739 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 740 else if (Call->onlyAccessesInaccessibleMemory()) 741 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 742 else if (Call->onlyAccessesInaccessibleMemOrArgMem()) 743 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 744 745 // If the call has operand bundles then aliasing attributes from the function 746 // it calls do not directly apply to the call. This can be made more precise 747 // in the future. 748 if (!Call->hasOperandBundles()) 749 if (const Function *F = Call->getCalledFunction()) 750 Min = 751 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 752 753 return Min; 754 } 755 756 /// Returns the behavior when calling the given function. For use when the call 757 /// site is not known. 758 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 759 // If the function declares it doesn't access memory, we can't do better. 760 if (F->doesNotAccessMemory()) 761 return FMRB_DoesNotAccessMemory; 762 763 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 764 765 // If the function declares it only reads memory, go with that. 766 if (F->onlyReadsMemory()) 767 Min = FMRB_OnlyReadsMemory; 768 else if (F->doesNotReadMemory()) 769 Min = FMRB_OnlyWritesMemory; 770 771 if (F->onlyAccessesArgMemory()) 772 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 773 else if (F->onlyAccessesInaccessibleMemory()) 774 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 775 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 776 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 777 778 return Min; 779 } 780 781 /// Returns true if this is a writeonly (i.e Mod only) parameter. 782 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, 783 const TargetLibraryInfo &TLI) { 784 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) 785 return true; 786 787 // We can bound the aliasing properties of memset_pattern16 just as we can 788 // for memcpy/memset. This is particularly important because the 789 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 790 // whenever possible. 791 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 792 // attributes. 793 LibFunc F; 794 if (Call->getCalledFunction() && 795 TLI.getLibFunc(*Call->getCalledFunction(), F) && 796 F == LibFunc_memset_pattern16 && TLI.has(F)) 797 if (ArgIdx == 0) 798 return true; 799 800 // TODO: memset_pattern4, memset_pattern8 801 // TODO: _chk variants 802 // TODO: strcmp, strcpy 803 804 return false; 805 } 806 807 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, 808 unsigned ArgIdx) { 809 // Checking for known builtin intrinsics and target library functions. 810 if (isWriteOnlyParam(Call, ArgIdx, TLI)) 811 return ModRefInfo::Mod; 812 813 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) 814 return ModRefInfo::Ref; 815 816 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) 817 return ModRefInfo::NoModRef; 818 819 return AAResultBase::getArgModRefInfo(Call, ArgIdx); 820 } 821 822 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { 823 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call); 824 return II && II->getIntrinsicID() == IID; 825 } 826 827 #ifndef NDEBUG 828 static const Function *getParent(const Value *V) { 829 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 830 if (!inst->getParent()) 831 return nullptr; 832 return inst->getParent()->getParent(); 833 } 834 835 if (const Argument *arg = dyn_cast<Argument>(V)) 836 return arg->getParent(); 837 838 return nullptr; 839 } 840 841 static bool notDifferentParent(const Value *O1, const Value *O2) { 842 843 const Function *F1 = getParent(O1); 844 const Function *F2 = getParent(O2); 845 846 return !F1 || !F2 || F1 == F2; 847 } 848 #endif 849 850 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 851 const MemoryLocation &LocB, 852 AAQueryInfo &AAQI) { 853 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 854 "BasicAliasAnalysis doesn't support interprocedural queries."); 855 856 // If we have a directly cached entry for these locations, we have recursed 857 // through this once, so just return the cached results. Notably, when this 858 // happens, we don't clear the cache. 859 auto CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocA, LocB)); 860 if (CacheIt != AAQI.AliasCache.end()) 861 return CacheIt->second; 862 863 CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocB, LocA)); 864 if (CacheIt != AAQI.AliasCache.end()) 865 return CacheIt->second; 866 867 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr, 868 LocB.Size, LocB.AATags, AAQI); 869 870 VisitedPhiBBs.clear(); 871 return Alias; 872 } 873 874 /// Checks to see if the specified callsite can clobber the specified memory 875 /// object. 876 /// 877 /// Since we only look at local properties of this function, we really can't 878 /// say much about this query. We do, however, use simple "address taken" 879 /// analysis on local objects. 880 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, 881 const MemoryLocation &Loc, 882 AAQueryInfo &AAQI) { 883 assert(notDifferentParent(Call, Loc.Ptr) && 884 "AliasAnalysis query involving multiple functions!"); 885 886 const Value *Object = GetUnderlyingObject(Loc.Ptr, DL); 887 888 // Calls marked 'tail' cannot read or write allocas from the current frame 889 // because the current frame might be destroyed by the time they run. However, 890 // a tail call may use an alloca with byval. Calling with byval copies the 891 // contents of the alloca into argument registers or stack slots, so there is 892 // no lifetime issue. 893 if (isa<AllocaInst>(Object)) 894 if (const CallInst *CI = dyn_cast<CallInst>(Call)) 895 if (CI->isTailCall() && 896 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 897 return ModRefInfo::NoModRef; 898 899 // Stack restore is able to modify unescaped dynamic allocas. Assume it may 900 // modify them even though the alloca is not escaped. 901 if (auto *AI = dyn_cast<AllocaInst>(Object)) 902 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore)) 903 return ModRefInfo::Mod; 904 905 // If the pointer is to a locally allocated object that does not escape, 906 // then the call can not mod/ref the pointer unless the call takes the pointer 907 // as an argument, and itself doesn't capture it. 908 if (!isa<Constant>(Object) && Call != Object && 909 isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) { 910 911 // Optimistically assume that call doesn't touch Object and check this 912 // assumption in the following loop. 913 ModRefInfo Result = ModRefInfo::NoModRef; 914 bool IsMustAlias = true; 915 916 unsigned OperandNo = 0; 917 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); 918 CI != CE; ++CI, ++OperandNo) { 919 // Only look at the no-capture or byval pointer arguments. If this 920 // pointer were passed to arguments that were neither of these, then it 921 // couldn't be no-capture. 922 if (!(*CI)->getType()->isPointerTy() || 923 (!Call->doesNotCapture(OperandNo) && 924 OperandNo < Call->getNumArgOperands() && 925 !Call->isByValArgument(OperandNo))) 926 continue; 927 928 // Call doesn't access memory through this operand, so we don't care 929 // if it aliases with Object. 930 if (Call->doesNotAccessMemory(OperandNo)) 931 continue; 932 933 // If this is a no-capture pointer argument, see if we can tell that it 934 // is impossible to alias the pointer we're checking. 935 AliasResult AR = getBestAAResults().alias(MemoryLocation(*CI), 936 MemoryLocation(Object), AAQI); 937 if (AR != MustAlias) 938 IsMustAlias = false; 939 // Operand doesn't alias 'Object', continue looking for other aliases 940 if (AR == NoAlias) 941 continue; 942 // Operand aliases 'Object', but call doesn't modify it. Strengthen 943 // initial assumption and keep looking in case if there are more aliases. 944 if (Call->onlyReadsMemory(OperandNo)) { 945 Result = setRef(Result); 946 continue; 947 } 948 // Operand aliases 'Object' but call only writes into it. 949 if (Call->doesNotReadMemory(OperandNo)) { 950 Result = setMod(Result); 951 continue; 952 } 953 // This operand aliases 'Object' and call reads and writes into it. 954 // Setting ModRef will not yield an early return below, MustAlias is not 955 // used further. 956 Result = ModRefInfo::ModRef; 957 break; 958 } 959 960 // No operand aliases, reset Must bit. Add below if at least one aliases 961 // and all aliases found are MustAlias. 962 if (isNoModRef(Result)) 963 IsMustAlias = false; 964 965 // Early return if we improved mod ref information 966 if (!isModAndRefSet(Result)) { 967 if (isNoModRef(Result)) 968 return ModRefInfo::NoModRef; 969 return IsMustAlias ? setMust(Result) : clearMust(Result); 970 } 971 } 972 973 // If the call is malloc/calloc like, we can assume that it doesn't 974 // modify any IR visible value. This is only valid because we assume these 975 // routines do not read values visible in the IR. TODO: Consider special 976 // casing realloc and strdup routines which access only their arguments as 977 // well. Or alternatively, replace all of this with inaccessiblememonly once 978 // that's implemented fully. 979 if (isMallocOrCallocLikeFn(Call, &TLI)) { 980 // Be conservative if the accessed pointer may alias the allocation - 981 // fallback to the generic handling below. 982 if (getBestAAResults().alias(MemoryLocation(Call), Loc, AAQI) == NoAlias) 983 return ModRefInfo::NoModRef; 984 } 985 986 // The semantics of memcpy intrinsics forbid overlap between their respective 987 // operands, i.e., source and destination of any given memcpy must no-alias. 988 // If Loc must-aliases either one of these two locations, then it necessarily 989 // no-aliases the other. 990 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) { 991 AliasResult SrcAA, DestAA; 992 993 if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst), 994 Loc, AAQI)) == MustAlias) 995 // Loc is exactly the memcpy source thus disjoint from memcpy dest. 996 return ModRefInfo::Ref; 997 if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst), 998 Loc, AAQI)) == MustAlias) 999 // The converse case. 1000 return ModRefInfo::Mod; 1001 1002 // It's also possible for Loc to alias both src and dest, or neither. 1003 ModRefInfo rv = ModRefInfo::NoModRef; 1004 if (SrcAA != NoAlias) 1005 rv = setRef(rv); 1006 if (DestAA != NoAlias) 1007 rv = setMod(rv); 1008 return rv; 1009 } 1010 1011 // While the assume intrinsic is marked as arbitrarily writing so that 1012 // proper control dependencies will be maintained, it never aliases any 1013 // particular memory location. 1014 if (isIntrinsicCall(Call, Intrinsic::assume)) 1015 return ModRefInfo::NoModRef; 1016 1017 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 1018 // that proper control dependencies are maintained but they never mods any 1019 // particular memory location. 1020 // 1021 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1022 // heap state at the point the guard is issued needs to be consistent in case 1023 // the guard invokes the "deopt" continuation. 1024 if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) 1025 return ModRefInfo::Ref; 1026 1027 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 1028 // writing so that proper control dependencies are maintained but they never 1029 // mod any particular memory location visible to the IR. 1030 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 1031 // intrinsic is now modeled as reading memory. This prevents hoisting the 1032 // invariant.start intrinsic over stores. Consider: 1033 // *ptr = 40; 1034 // *ptr = 50; 1035 // invariant_start(ptr) 1036 // int val = *ptr; 1037 // print(val); 1038 // 1039 // This cannot be transformed to: 1040 // 1041 // *ptr = 40; 1042 // invariant_start(ptr) 1043 // *ptr = 50; 1044 // int val = *ptr; 1045 // print(val); 1046 // 1047 // The transformation will cause the second store to be ignored (based on 1048 // rules of invariant.start) and print 40, while the first program always 1049 // prints 50. 1050 if (isIntrinsicCall(Call, Intrinsic::invariant_start)) 1051 return ModRefInfo::Ref; 1052 1053 // The AAResultBase base class has some smarts, lets use them. 1054 return AAResultBase::getModRefInfo(Call, Loc, AAQI); 1055 } 1056 1057 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, 1058 const CallBase *Call2, 1059 AAQueryInfo &AAQI) { 1060 // While the assume intrinsic is marked as arbitrarily writing so that 1061 // proper control dependencies will be maintained, it never aliases any 1062 // particular memory location. 1063 if (isIntrinsicCall(Call1, Intrinsic::assume) || 1064 isIntrinsicCall(Call2, Intrinsic::assume)) 1065 return ModRefInfo::NoModRef; 1066 1067 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 1068 // that proper control dependencies are maintained but they never mod any 1069 // particular memory location. 1070 // 1071 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1072 // heap state at the point the guard is issued needs to be consistent in case 1073 // the guard invokes the "deopt" continuation. 1074 1075 // NB! This function is *not* commutative, so we special case two 1076 // possibilities for guard intrinsics. 1077 1078 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) 1079 return isModSet(createModRefInfo(getModRefBehavior(Call2))) 1080 ? ModRefInfo::Ref 1081 : ModRefInfo::NoModRef; 1082 1083 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) 1084 return isModSet(createModRefInfo(getModRefBehavior(Call1))) 1085 ? ModRefInfo::Mod 1086 : ModRefInfo::NoModRef; 1087 1088 // The AAResultBase base class has some smarts, lets use them. 1089 return AAResultBase::getModRefInfo(Call1, Call2, AAQI); 1090 } 1091 1092 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators, 1093 /// both having the exact same pointer operand. 1094 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, 1095 LocationSize MaybeV1Size, 1096 const GEPOperator *GEP2, 1097 LocationSize MaybeV2Size, 1098 const DataLayout &DL) { 1099 assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() == 1100 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() && 1101 GEP1->getPointerOperandType() == GEP2->getPointerOperandType() && 1102 "Expected GEPs with the same pointer operand"); 1103 1104 // Try to determine whether GEP1 and GEP2 index through arrays, into structs, 1105 // such that the struct field accesses provably cannot alias. 1106 // We also need at least two indices (the pointer, and the struct field). 1107 if (GEP1->getNumIndices() != GEP2->getNumIndices() || 1108 GEP1->getNumIndices() < 2) 1109 return MayAlias; 1110 1111 // If we don't know the size of the accesses through both GEPs, we can't 1112 // determine whether the struct fields accessed can't alias. 1113 if (MaybeV1Size == LocationSize::unknown() || 1114 MaybeV2Size == LocationSize::unknown()) 1115 return MayAlias; 1116 1117 const uint64_t V1Size = MaybeV1Size.getValue(); 1118 const uint64_t V2Size = MaybeV2Size.getValue(); 1119 1120 ConstantInt *C1 = 1121 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1)); 1122 ConstantInt *C2 = 1123 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1)); 1124 1125 // If the last (struct) indices are constants and are equal, the other indices 1126 // might be also be dynamically equal, so the GEPs can alias. 1127 if (C1 && C2) { 1128 unsigned BitWidth = std::max(C1->getBitWidth(), C2->getBitWidth()); 1129 if (C1->getValue().sextOrSelf(BitWidth) == 1130 C2->getValue().sextOrSelf(BitWidth)) 1131 return MayAlias; 1132 } 1133 1134 // Find the last-indexed type of the GEP, i.e., the type you'd get if 1135 // you stripped the last index. 1136 // On the way, look at each indexed type. If there's something other 1137 // than an array, different indices can lead to different final types. 1138 SmallVector<Value *, 8> IntermediateIndices; 1139 1140 // Insert the first index; we don't need to check the type indexed 1141 // through it as it only drops the pointer indirection. 1142 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine"); 1143 IntermediateIndices.push_back(GEP1->getOperand(1)); 1144 1145 // Insert all the remaining indices but the last one. 1146 // Also, check that they all index through arrays. 1147 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) { 1148 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType( 1149 GEP1->getSourceElementType(), IntermediateIndices))) 1150 return MayAlias; 1151 IntermediateIndices.push_back(GEP1->getOperand(i + 1)); 1152 } 1153 1154 auto *Ty = GetElementPtrInst::getIndexedType( 1155 GEP1->getSourceElementType(), IntermediateIndices); 1156 StructType *LastIndexedStruct = dyn_cast<StructType>(Ty); 1157 1158 if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) { 1159 // We know that: 1160 // - both GEPs begin indexing from the exact same pointer; 1161 // - the last indices in both GEPs are constants, indexing into a sequential 1162 // type (array or vector); 1163 // - both GEPs only index through arrays prior to that. 1164 // 1165 // Because array indices greater than the number of elements are valid in 1166 // GEPs, unless we know the intermediate indices are identical between 1167 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't 1168 // partially overlap. We also need to check that the loaded size matches 1169 // the element size, otherwise we could still have overlap. 1170 Type *LastElementTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0); 1171 const uint64_t ElementSize = 1172 DL.getTypeStoreSize(LastElementTy).getFixedSize(); 1173 if (V1Size != ElementSize || V2Size != ElementSize) 1174 return MayAlias; 1175 1176 for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i) 1177 if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1)) 1178 return MayAlias; 1179 1180 // Now we know that the array/pointer that GEP1 indexes into and that 1181 // that GEP2 indexes into must either precisely overlap or be disjoint. 1182 // Because they cannot partially overlap and because fields in an array 1183 // cannot overlap, if we can prove the final indices are different between 1184 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias. 1185 1186 // If the last indices are constants, we've already checked they don't 1187 // equal each other so we can exit early. 1188 if (C1 && C2) 1189 return NoAlias; 1190 { 1191 Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1); 1192 Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1); 1193 if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) { 1194 // If one of the indices is a PHI node, be safe and only use 1195 // computeKnownBits so we don't make any assumptions about the 1196 // relationships between the two indices. This is important if we're 1197 // asking about values from different loop iterations. See PR32314. 1198 // TODO: We may be able to change the check so we only do this when 1199 // we definitely looked through a PHINode. 1200 if (GEP1LastIdx != GEP2LastIdx && 1201 GEP1LastIdx->getType() == GEP2LastIdx->getType()) { 1202 KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL); 1203 KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL); 1204 if (Known1.Zero.intersects(Known2.One) || 1205 Known1.One.intersects(Known2.Zero)) 1206 return NoAlias; 1207 } 1208 } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL)) 1209 return NoAlias; 1210 } 1211 return MayAlias; 1212 } else if (!LastIndexedStruct || !C1 || !C2) { 1213 return MayAlias; 1214 } 1215 1216 if (C1->getValue().getActiveBits() > 64 || 1217 C2->getValue().getActiveBits() > 64) 1218 return MayAlias; 1219 1220 // We know that: 1221 // - both GEPs begin indexing from the exact same pointer; 1222 // - the last indices in both GEPs are constants, indexing into a struct; 1223 // - said indices are different, hence, the pointed-to fields are different; 1224 // - both GEPs only index through arrays prior to that. 1225 // 1226 // This lets us determine that the struct that GEP1 indexes into and the 1227 // struct that GEP2 indexes into must either precisely overlap or be 1228 // completely disjoint. Because they cannot partially overlap, indexing into 1229 // different non-overlapping fields of the struct will never alias. 1230 1231 // Therefore, the only remaining thing needed to show that both GEPs can't 1232 // alias is that the fields are not overlapping. 1233 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct); 1234 const uint64_t StructSize = SL->getSizeInBytes(); 1235 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue()); 1236 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue()); 1237 1238 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size, 1239 uint64_t V2Off, uint64_t V2Size) { 1240 return V1Off < V2Off && V1Off + V1Size <= V2Off && 1241 ((V2Off + V2Size <= StructSize) || 1242 (V2Off + V2Size - StructSize <= V1Off)); 1243 }; 1244 1245 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) || 1246 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size)) 1247 return NoAlias; 1248 1249 return MayAlias; 1250 } 1251 1252 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the 1253 // beginning of the object the GEP points would have a negative offset with 1254 // repsect to the alloca, that means the GEP can not alias pointer (b). 1255 // Note that the pointer based on the alloca may not be a GEP. For 1256 // example, it may be the alloca itself. 1257 // The same applies if (b) is based on a GlobalVariable. Note that just being 1258 // based on isIdentifiedObject() is not enough - we need an identified object 1259 // that does not permit access to negative offsets. For example, a negative 1260 // offset from a noalias argument or call can be inbounds w.r.t the actual 1261 // underlying object. 1262 // 1263 // For example, consider: 1264 // 1265 // struct { int f0, int f1, ...} foo; 1266 // foo alloca; 1267 // foo* random = bar(alloca); 1268 // int *f0 = &alloca.f0 1269 // int *f1 = &random->f1; 1270 // 1271 // Which is lowered, approximately, to: 1272 // 1273 // %alloca = alloca %struct.foo 1274 // %random = call %struct.foo* @random(%struct.foo* %alloca) 1275 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0 1276 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1 1277 // 1278 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated 1279 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also 1280 // point into the same object. But since %f0 points to the beginning of %alloca, 1281 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher 1282 // than (%alloca - 1), and so is not inbounds, a contradiction. 1283 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp, 1284 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject, 1285 LocationSize MaybeObjectAccessSize) { 1286 // If the object access size is unknown, or the GEP isn't inbounds, bail. 1287 if (MaybeObjectAccessSize == LocationSize::unknown() || !GEPOp->isInBounds()) 1288 return false; 1289 1290 const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue(); 1291 1292 // We need the object to be an alloca or a globalvariable, and want to know 1293 // the offset of the pointer from the object precisely, so no variable 1294 // indices are allowed. 1295 if (!(isa<AllocaInst>(DecompObject.Base) || 1296 isa<GlobalVariable>(DecompObject.Base)) || 1297 !DecompObject.VarIndices.empty()) 1298 return false; 1299 1300 APInt ObjectBaseOffset = DecompObject.StructOffset + 1301 DecompObject.OtherOffset; 1302 1303 // If the GEP has no variable indices, we know the precise offset 1304 // from the base, then use it. If the GEP has variable indices, 1305 // we can't get exact GEP offset to identify pointer alias. So return 1306 // false in that case. 1307 if (!DecompGEP.VarIndices.empty()) 1308 return false; 1309 1310 APInt GEPBaseOffset = DecompGEP.StructOffset; 1311 GEPBaseOffset += DecompGEP.OtherOffset; 1312 1313 return GEPBaseOffset.sge(ObjectBaseOffset + (int64_t)ObjectAccessSize); 1314 } 1315 1316 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1317 /// another pointer. 1318 /// 1319 /// We know that V1 is a GEP, but we don't know anything about V2. 1320 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for 1321 /// V2. 1322 AliasResult BasicAAResult::aliasGEP( 1323 const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo, 1324 const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo, 1325 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { 1326 DecomposedGEP DecompGEP1, DecompGEP2; 1327 unsigned MaxPointerSize = getMaxPointerSize(DL); 1328 DecompGEP1.StructOffset = DecompGEP1.OtherOffset = APInt(MaxPointerSize, 0); 1329 DecompGEP2.StructOffset = DecompGEP2.OtherOffset = APInt(MaxPointerSize, 0); 1330 DecompGEP1.HasCompileTimeConstantScale = 1331 DecompGEP2.HasCompileTimeConstantScale = true; 1332 1333 bool GEP1MaxLookupReached = 1334 DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT); 1335 bool GEP2MaxLookupReached = 1336 DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT); 1337 1338 // Don't attempt to analyze the decomposed GEP if index scale is not a 1339 // compile-time constant. 1340 if (!DecompGEP1.HasCompileTimeConstantScale || 1341 !DecompGEP2.HasCompileTimeConstantScale) 1342 return MayAlias; 1343 1344 APInt GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset; 1345 APInt GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset; 1346 1347 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 && 1348 "DecomposeGEPExpression returned a result different from " 1349 "GetUnderlyingObject"); 1350 1351 // If the GEP's offset relative to its base is such that the base would 1352 // fall below the start of the object underlying V2, then the GEP and V2 1353 // cannot alias. 1354 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached && 1355 isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size)) 1356 return NoAlias; 1357 // If we have two gep instructions with must-alias or not-alias'ing base 1358 // pointers, figure out if the indexes to the GEP tell us anything about the 1359 // derived pointer. 1360 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 1361 // Check for the GEP base being at a negative offset, this time in the other 1362 // direction. 1363 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached && 1364 isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size)) 1365 return NoAlias; 1366 // Do the base pointers alias? 1367 AliasResult BaseAlias = 1368 aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(), 1369 UnderlyingV2, LocationSize::unknown(), AAMDNodes(), AAQI); 1370 1371 // Check for geps of non-aliasing underlying pointers where the offsets are 1372 // identical. 1373 if ((BaseAlias == MayAlias) && V1Size == V2Size) { 1374 // Do the base pointers alias assuming type and size. 1375 AliasResult PreciseBaseAlias = aliasCheck( 1376 UnderlyingV1, V1Size, V1AAInfo, UnderlyingV2, V2Size, V2AAInfo, AAQI); 1377 if (PreciseBaseAlias == NoAlias) { 1378 // See if the computed offset from the common pointer tells us about the 1379 // relation of the resulting pointer. 1380 // If the max search depth is reached the result is undefined 1381 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1382 return MayAlias; 1383 1384 // Same offsets. 1385 if (GEP1BaseOffset == GEP2BaseOffset && 1386 DecompGEP1.VarIndices == DecompGEP2.VarIndices) 1387 return NoAlias; 1388 } 1389 } 1390 1391 // If we get a No or May, then return it immediately, no amount of analysis 1392 // will improve this situation. 1393 if (BaseAlias != MustAlias) { 1394 assert(BaseAlias == NoAlias || BaseAlias == MayAlias); 1395 return BaseAlias; 1396 } 1397 1398 // Otherwise, we have a MustAlias. Since the base pointers alias each other 1399 // exactly, see if the computed offset from the common pointer tells us 1400 // about the relation of the resulting pointer. 1401 // If we know the two GEPs are based off of the exact same pointer (and not 1402 // just the same underlying object), see if that tells us anything about 1403 // the resulting pointers. 1404 if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() == 1405 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() && 1406 GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) { 1407 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL); 1408 // If we couldn't find anything interesting, don't abandon just yet. 1409 if (R != MayAlias) 1410 return R; 1411 } 1412 1413 // If the max search depth is reached, the result is undefined 1414 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1415 return MayAlias; 1416 1417 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1418 // symbolic difference. 1419 GEP1BaseOffset -= GEP2BaseOffset; 1420 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices); 1421 1422 } else { 1423 // Check to see if these two pointers are related by the getelementptr 1424 // instruction. If one pointer is a GEP with a non-zero index of the other 1425 // pointer, we know they cannot alias. 1426 1427 // If both accesses are unknown size, we can't do anything useful here. 1428 if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown()) 1429 return MayAlias; 1430 1431 AliasResult R = aliasCheck(UnderlyingV1, LocationSize::unknown(), 1432 AAMDNodes(), V2, LocationSize::unknown(), 1433 V2AAInfo, AAQI, nullptr, UnderlyingV2); 1434 if (R != MustAlias) { 1435 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 1436 // If V2 is known not to alias GEP base pointer, then the two values 1437 // cannot alias per GEP semantics: "Any memory access must be done through 1438 // a pointer value associated with an address range of the memory access, 1439 // otherwise the behavior is undefined.". 1440 assert(R == NoAlias || R == MayAlias); 1441 return R; 1442 } 1443 1444 // If the max search depth is reached the result is undefined 1445 if (GEP1MaxLookupReached) 1446 return MayAlias; 1447 } 1448 1449 // In the two GEP Case, if there is no difference in the offsets of the 1450 // computed pointers, the resultant pointers are a must alias. This 1451 // happens when we have two lexically identical GEP's (for example). 1452 // 1453 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 1454 // must aliases the GEP, the end result is a must alias also. 1455 if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty()) 1456 return MustAlias; 1457 1458 // If there is a constant difference between the pointers, but the difference 1459 // is less than the size of the associated memory object, then we know 1460 // that the objects are partially overlapping. If the difference is 1461 // greater, we know they do not overlap. 1462 if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) { 1463 if (GEP1BaseOffset.sge(0)) { 1464 if (V2Size != LocationSize::unknown()) { 1465 if (GEP1BaseOffset.ult(V2Size.getValue())) 1466 return PartialAlias; 1467 return NoAlias; 1468 } 1469 } else { 1470 // We have the situation where: 1471 // + + 1472 // | BaseOffset | 1473 // ---------------->| 1474 // |-->V1Size |-------> V2Size 1475 // GEP1 V2 1476 // We need to know that V2Size is not unknown, otherwise we might have 1477 // stripped a gep with negative index ('gep <ptr>, -1, ...). 1478 if (V1Size != LocationSize::unknown() && 1479 V2Size != LocationSize::unknown()) { 1480 if ((-GEP1BaseOffset).ult(V1Size.getValue())) 1481 return PartialAlias; 1482 return NoAlias; 1483 } 1484 } 1485 } 1486 1487 if (!DecompGEP1.VarIndices.empty()) { 1488 APInt Modulo(MaxPointerSize, 0); 1489 bool AllPositive = true; 1490 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1491 1492 // Try to distinguish something like &A[i][1] against &A[42][0]. 1493 // Grab the least significant bit set in any of the scales. We 1494 // don't need std::abs here (even if the scale's negative) as we'll 1495 // be ^'ing Modulo with itself later. 1496 Modulo |= DecompGEP1.VarIndices[i].Scale; 1497 1498 if (AllPositive) { 1499 // If the Value could change between cycles, then any reasoning about 1500 // the Value this cycle may not hold in the next cycle. We'll just 1501 // give up if we can't determine conditions that hold for every cycle: 1502 const Value *V = DecompGEP1.VarIndices[i].V; 1503 1504 KnownBits Known = 1505 computeKnownBits(V, DL, 0, &AC, dyn_cast<Instruction>(GEP1), DT); 1506 bool SignKnownZero = Known.isNonNegative(); 1507 bool SignKnownOne = Known.isNegative(); 1508 1509 // Zero-extension widens the variable, and so forces the sign 1510 // bit to zero. 1511 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V); 1512 SignKnownZero |= IsZExt; 1513 SignKnownOne &= !IsZExt; 1514 1515 // If the variable begins with a zero then we know it's 1516 // positive, regardless of whether the value is signed or 1517 // unsigned. 1518 APInt Scale = DecompGEP1.VarIndices[i].Scale; 1519 AllPositive = 1520 (SignKnownZero && Scale.sge(0)) || (SignKnownOne && Scale.slt(0)); 1521 } 1522 } 1523 1524 Modulo = Modulo ^ (Modulo & (Modulo - 1)); 1525 1526 // We can compute the difference between the two addresses 1527 // mod Modulo. Check whether that difference guarantees that the 1528 // two locations do not alias. 1529 APInt ModOffset = GEP1BaseOffset & (Modulo - 1); 1530 if (V1Size != LocationSize::unknown() && 1531 V2Size != LocationSize::unknown() && ModOffset.uge(V2Size.getValue()) && 1532 (Modulo - ModOffset).uge(V1Size.getValue())) 1533 return NoAlias; 1534 1535 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr. 1536 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers 1537 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr. 1538 if (AllPositive && GEP1BaseOffset.sgt(0) && 1539 V2Size != LocationSize::unknown() && 1540 GEP1BaseOffset.uge(V2Size.getValue())) 1541 return NoAlias; 1542 1543 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size, 1544 GEP1BaseOffset, &AC, DT)) 1545 return NoAlias; 1546 } 1547 1548 // Statically, we can see that the base objects are the same, but the 1549 // pointers have dynamic offsets which we can't resolve. And none of our 1550 // little tricks above worked. 1551 return MayAlias; 1552 } 1553 1554 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1555 // If the results agree, take it. 1556 if (A == B) 1557 return A; 1558 // A mix of PartialAlias and MustAlias is PartialAlias. 1559 if ((A == PartialAlias && B == MustAlias) || 1560 (B == PartialAlias && A == MustAlias)) 1561 return PartialAlias; 1562 // Otherwise, we don't know anything. 1563 return MayAlias; 1564 } 1565 1566 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1567 /// against another. 1568 AliasResult 1569 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, 1570 const AAMDNodes &SIAAInfo, const Value *V2, 1571 LocationSize V2Size, const AAMDNodes &V2AAInfo, 1572 const Value *UnderV2, AAQueryInfo &AAQI) { 1573 // If the values are Selects with the same condition, we can do a more precise 1574 // check: just check for aliases between the values on corresponding arms. 1575 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1576 if (SI->getCondition() == SI2->getCondition()) { 1577 AliasResult Alias = 1578 aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, SI2->getTrueValue(), 1579 V2Size, V2AAInfo, AAQI); 1580 if (Alias == MayAlias) 1581 return MayAlias; 1582 AliasResult ThisAlias = 1583 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo, 1584 SI2->getFalseValue(), V2Size, V2AAInfo, AAQI); 1585 return MergeAliasResults(ThisAlias, Alias); 1586 } 1587 1588 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1589 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1590 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), 1591 SISize, SIAAInfo, AAQI, UnderV2); 1592 if (Alias == MayAlias) 1593 return MayAlias; 1594 1595 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), 1596 SISize, SIAAInfo, AAQI, UnderV2); 1597 return MergeAliasResults(ThisAlias, Alias); 1598 } 1599 1600 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1601 /// another. 1602 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, 1603 const AAMDNodes &PNAAInfo, const Value *V2, 1604 LocationSize V2Size, 1605 const AAMDNodes &V2AAInfo, 1606 const Value *UnderV2, AAQueryInfo &AAQI) { 1607 // Track phi nodes we have visited. We use this information when we determine 1608 // value equivalence. 1609 VisitedPhiBBs.insert(PN->getParent()); 1610 1611 // If the values are PHIs in the same block, we can do a more precise 1612 // as well as efficient check: just check for aliases between the values 1613 // on corresponding edges. 1614 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1615 if (PN2->getParent() == PN->getParent()) { 1616 AAQueryInfo::LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo), 1617 MemoryLocation(V2, V2Size, V2AAInfo)); 1618 if (PN > V2) 1619 std::swap(Locs.first, Locs.second); 1620 // Analyse the PHIs' inputs under the assumption that the PHIs are 1621 // NoAlias. 1622 // If the PHIs are May/MustAlias there must be (recursively) an input 1623 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or 1624 // there must be an operation on the PHIs within the PHIs' value cycle 1625 // that causes a MayAlias. 1626 // Pretend the phis do not alias. 1627 AliasResult Alias = NoAlias; 1628 AliasResult OrigAliasResult; 1629 { 1630 // Limited lifetime iterator invalidated by the aliasCheck call below. 1631 auto CacheIt = AAQI.AliasCache.find(Locs); 1632 assert((CacheIt != AAQI.AliasCache.end()) && 1633 "There must exist an entry for the phi node"); 1634 OrigAliasResult = CacheIt->second; 1635 CacheIt->second = NoAlias; 1636 } 1637 1638 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1639 AliasResult ThisAlias = 1640 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo, 1641 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1642 V2Size, V2AAInfo, AAQI); 1643 Alias = MergeAliasResults(ThisAlias, Alias); 1644 if (Alias == MayAlias) 1645 break; 1646 } 1647 1648 // Reset if speculation failed. 1649 if (Alias != NoAlias) { 1650 auto Pair = 1651 AAQI.AliasCache.insert(std::make_pair(Locs, OrigAliasResult)); 1652 assert(!Pair.second && "Entry must have existed"); 1653 Pair.first->second = OrigAliasResult; 1654 } 1655 return Alias; 1656 } 1657 1658 SmallVector<Value *, 4> V1Srcs; 1659 bool isRecursive = false; 1660 if (PV) { 1661 // If we have PhiValues then use it to get the underlying phi values. 1662 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN); 1663 // If we have more phi values than the search depth then return MayAlias 1664 // conservatively to avoid compile time explosion. The worst possible case 1665 // is if both sides are PHI nodes. In which case, this is O(m x n) time 1666 // where 'm' and 'n' are the number of PHI sources. 1667 if (PhiValueSet.size() > MaxLookupSearchDepth) 1668 return MayAlias; 1669 // Add the values to V1Srcs 1670 for (Value *PV1 : PhiValueSet) { 1671 if (EnableRecPhiAnalysis) { 1672 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) { 1673 // Check whether the incoming value is a GEP that advances the pointer 1674 // result of this PHI node (e.g. in a loop). If this is the case, we 1675 // would recurse and always get a MayAlias. Handle this case specially 1676 // below. 1677 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 && 1678 isa<ConstantInt>(PV1GEP->idx_begin())) { 1679 isRecursive = true; 1680 continue; 1681 } 1682 } 1683 } 1684 V1Srcs.push_back(PV1); 1685 } 1686 } else { 1687 // If we don't have PhiInfo then just look at the operands of the phi itself 1688 // FIXME: Remove this once we can guarantee that we have PhiInfo always 1689 SmallPtrSet<Value *, 4> UniqueSrc; 1690 for (Value *PV1 : PN->incoming_values()) { 1691 if (isa<PHINode>(PV1)) 1692 // If any of the source itself is a PHI, return MayAlias conservatively 1693 // to avoid compile time explosion. The worst possible case is if both 1694 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1695 // and 'n' are the number of PHI sources. 1696 return MayAlias; 1697 1698 if (EnableRecPhiAnalysis) 1699 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) { 1700 // Check whether the incoming value is a GEP that advances the pointer 1701 // result of this PHI node (e.g. in a loop). If this is the case, we 1702 // would recurse and always get a MayAlias. Handle this case specially 1703 // below. 1704 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 && 1705 isa<ConstantInt>(PV1GEP->idx_begin())) { 1706 isRecursive = true; 1707 continue; 1708 } 1709 } 1710 1711 if (UniqueSrc.insert(PV1).second) 1712 V1Srcs.push_back(PV1); 1713 } 1714 } 1715 1716 // If V1Srcs is empty then that means that the phi has no underlying non-phi 1717 // value. This should only be possible in blocks unreachable from the entry 1718 // block, but return MayAlias just in case. 1719 if (V1Srcs.empty()) 1720 return MayAlias; 1721 1722 // If this PHI node is recursive, set the size of the accessed memory to 1723 // unknown to represent all the possible values the GEP could advance the 1724 // pointer to. 1725 if (isRecursive) 1726 PNSize = LocationSize::unknown(); 1727 1728 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize, 1729 PNAAInfo, AAQI, UnderV2); 1730 1731 // Early exit if the check of the first PHI source against V2 is MayAlias. 1732 // Other results are not possible. 1733 if (Alias == MayAlias) 1734 return MayAlias; 1735 1736 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1737 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1738 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1739 Value *V = V1Srcs[i]; 1740 1741 AliasResult ThisAlias = 1742 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, AAQI, UnderV2); 1743 Alias = MergeAliasResults(ThisAlias, Alias); 1744 if (Alias == MayAlias) 1745 break; 1746 } 1747 1748 return Alias; 1749 } 1750 1751 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1752 /// array references. 1753 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, 1754 AAMDNodes V1AAInfo, const Value *V2, 1755 LocationSize V2Size, AAMDNodes V2AAInfo, 1756 AAQueryInfo &AAQI, const Value *O1, 1757 const Value *O2) { 1758 // If either of the memory references is empty, it doesn't matter what the 1759 // pointer values are. 1760 if (V1Size.isZero() || V2Size.isZero()) 1761 return NoAlias; 1762 1763 // Strip off any casts if they exist. 1764 V1 = V1->stripPointerCastsAndInvariantGroups(); 1765 V2 = V2->stripPointerCastsAndInvariantGroups(); 1766 1767 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1768 // value for undef that aliases nothing in the program. 1769 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1770 return NoAlias; 1771 1772 // Are we checking for alias of the same value? 1773 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1774 // different iterations. We must therefore make sure that this is not the 1775 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1776 // happen by looking at the visited phi nodes and making sure they cannot 1777 // reach the value. 1778 if (isValueEqualInPotentialCycles(V1, V2)) 1779 return MustAlias; 1780 1781 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1782 return NoAlias; // Scalars cannot alias each other 1783 1784 // Figure out what objects these things are pointing to if we can. 1785 if (O1 == nullptr) 1786 O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth); 1787 1788 if (O2 == nullptr) 1789 O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth); 1790 1791 // Null values in the default address space don't point to any object, so they 1792 // don't alias any other pointer. 1793 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1794 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1795 return NoAlias; 1796 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1797 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1798 return NoAlias; 1799 1800 if (O1 != O2) { 1801 // If V1/V2 point to two different objects, we know that we have no alias. 1802 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1803 return NoAlias; 1804 1805 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1806 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1807 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1808 return NoAlias; 1809 1810 // Function arguments can't alias with things that are known to be 1811 // unambigously identified at the function level. 1812 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1813 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1814 return NoAlias; 1815 1816 // If one pointer is the result of a call/invoke or load and the other is a 1817 // non-escaping local object within the same function, then we know the 1818 // object couldn't escape to a point where the call could return it. 1819 // 1820 // Note that if the pointers are in different functions, there are a 1821 // variety of complications. A call with a nocapture argument may still 1822 // temporary store the nocapture argument's value in a temporary memory 1823 // location if that memory location doesn't escape. Or it may pass a 1824 // nocapture value to other functions as long as they don't capture it. 1825 if (isEscapeSource(O1) && 1826 isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache)) 1827 return NoAlias; 1828 if (isEscapeSource(O2) && 1829 isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache)) 1830 return NoAlias; 1831 } 1832 1833 // If the size of one access is larger than the entire object on the other 1834 // side, then we know such behavior is undefined and can assume no alias. 1835 bool NullIsValidLocation = NullPointerIsDefined(&F); 1836 if ((isObjectSmallerThan( 1837 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL, 1838 TLI, NullIsValidLocation)) || 1839 (isObjectSmallerThan( 1840 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL, 1841 TLI, NullIsValidLocation))) 1842 return NoAlias; 1843 1844 // Check the cache before climbing up use-def chains. This also terminates 1845 // otherwise infinitely recursive queries. 1846 AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo), 1847 MemoryLocation(V2, V2Size, V2AAInfo)); 1848 if (V1 > V2) 1849 std::swap(Locs.first, Locs.second); 1850 std::pair<AAQueryInfo::AliasCacheT::iterator, bool> Pair = 1851 AAQI.AliasCache.try_emplace(Locs, MayAlias); 1852 if (!Pair.second) 1853 return Pair.first->second; 1854 1855 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the 1856 // GEP can't simplify, we don't even look at the PHI cases. 1857 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) { 1858 std::swap(V1, V2); 1859 std::swap(V1Size, V2Size); 1860 std::swap(O1, O2); 1861 std::swap(V1AAInfo, V2AAInfo); 1862 } 1863 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1864 AliasResult Result = 1865 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI); 1866 if (Result != MayAlias) { 1867 auto ItInsPair = AAQI.AliasCache.insert(std::make_pair(Locs, Result)); 1868 assert(!ItInsPair.second && "Entry must have existed"); 1869 ItInsPair.first->second = Result; 1870 return Result; 1871 } 1872 } 1873 1874 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { 1875 std::swap(V1, V2); 1876 std::swap(O1, O2); 1877 std::swap(V1Size, V2Size); 1878 std::swap(V1AAInfo, V2AAInfo); 1879 } 1880 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1881 AliasResult Result = 1882 aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI); 1883 if (Result != MayAlias) { 1884 Pair = AAQI.AliasCache.try_emplace(Locs, Result); 1885 assert(!Pair.second && "Entry must have existed"); 1886 return Pair.first->second = Result; 1887 } 1888 } 1889 1890 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { 1891 std::swap(V1, V2); 1892 std::swap(O1, O2); 1893 std::swap(V1Size, V2Size); 1894 std::swap(V1AAInfo, V2AAInfo); 1895 } 1896 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1897 AliasResult Result = 1898 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI); 1899 if (Result != MayAlias) { 1900 Pair = AAQI.AliasCache.try_emplace(Locs, Result); 1901 assert(!Pair.second && "Entry must have existed"); 1902 return Pair.first->second = Result; 1903 } 1904 } 1905 1906 // If both pointers are pointing into the same object and one of them 1907 // accesses the entire object, then the accesses must overlap in some way. 1908 if (O1 == O2) 1909 if (V1Size.isPrecise() && V2Size.isPrecise() && 1910 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || 1911 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) { 1912 Pair = AAQI.AliasCache.try_emplace(Locs, PartialAlias); 1913 assert(!Pair.second && "Entry must have existed"); 1914 return Pair.first->second = PartialAlias; 1915 } 1916 1917 // Recurse back into the best AA results we have, potentially with refined 1918 // memory locations. We have already ensured that BasicAA has a MayAlias 1919 // cache result for these, so any recursion back into BasicAA won't loop. 1920 AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second, AAQI); 1921 Pair = AAQI.AliasCache.try_emplace(Locs, Result); 1922 assert(!Pair.second && "Entry must have existed"); 1923 return Pair.first->second = Result; 1924 } 1925 1926 /// Check whether two Values can be considered equivalent. 1927 /// 1928 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1929 /// they can not be part of a cycle in the value graph by looking at all 1930 /// visited phi nodes an making sure that the phis cannot reach the value. We 1931 /// have to do this because we are looking through phi nodes (That is we say 1932 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1933 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1934 const Value *V2) { 1935 if (V != V2) 1936 return false; 1937 1938 const Instruction *Inst = dyn_cast<Instruction>(V); 1939 if (!Inst) 1940 return true; 1941 1942 if (VisitedPhiBBs.empty()) 1943 return true; 1944 1945 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1946 return false; 1947 1948 // Make sure that the visited phis cannot reach the Value. This ensures that 1949 // the Values cannot come from different iterations of a potential cycle the 1950 // phi nodes could be involved in. 1951 for (auto *P : VisitedPhiBBs) 1952 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT, LI)) 1953 return false; 1954 1955 return true; 1956 } 1957 1958 /// Computes the symbolic difference between two de-composed GEPs. 1959 /// 1960 /// Dest and Src are the variable indices from two decomposed GetElementPtr 1961 /// instructions GEP1 and GEP2 which have common base pointers. 1962 void BasicAAResult::GetIndexDifference( 1963 SmallVectorImpl<VariableGEPIndex> &Dest, 1964 const SmallVectorImpl<VariableGEPIndex> &Src) { 1965 if (Src.empty()) 1966 return; 1967 1968 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 1969 const Value *V = Src[i].V; 1970 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits; 1971 APInt Scale = Src[i].Scale; 1972 1973 // Find V in Dest. This is N^2, but pointer indices almost never have more 1974 // than a few variable indexes. 1975 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 1976 if (!isValueEqualInPotentialCycles(Dest[j].V, V) || 1977 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits) 1978 continue; 1979 1980 // If we found it, subtract off Scale V's from the entry in Dest. If it 1981 // goes to zero, remove the entry. 1982 if (Dest[j].Scale != Scale) 1983 Dest[j].Scale -= Scale; 1984 else 1985 Dest.erase(Dest.begin() + j); 1986 Scale = 0; 1987 break; 1988 } 1989 1990 // If we didn't consume this entry, add it to the end of the Dest list. 1991 if (!!Scale) { 1992 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale}; 1993 Dest.push_back(Entry); 1994 } 1995 } 1996 } 1997 1998 bool BasicAAResult::constantOffsetHeuristic( 1999 const SmallVectorImpl<VariableGEPIndex> &VarIndices, 2000 LocationSize MaybeV1Size, LocationSize MaybeV2Size, APInt BaseOffset, 2001 AssumptionCache *AC, DominatorTree *DT) { 2002 if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() || 2003 MaybeV2Size == LocationSize::unknown()) 2004 return false; 2005 2006 const uint64_t V1Size = MaybeV1Size.getValue(); 2007 const uint64_t V2Size = MaybeV2Size.getValue(); 2008 2009 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1]; 2010 2011 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits || 2012 Var0.Scale != -Var1.Scale) 2013 return false; 2014 2015 unsigned Width = Var1.V->getType()->getIntegerBitWidth(); 2016 2017 // We'll strip off the Extensions of Var0 and Var1 and do another round 2018 // of GetLinearExpression decomposition. In the example above, if Var0 2019 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 2020 2021 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0), 2022 V1Offset(Width, 0); 2023 bool NSW = true, NUW = true; 2024 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0; 2025 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits, 2026 V0SExtBits, DL, 0, AC, DT, NSW, NUW); 2027 NSW = true; 2028 NUW = true; 2029 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits, 2030 V1SExtBits, DL, 0, AC, DT, NSW, NUW); 2031 2032 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits || 2033 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1)) 2034 return false; 2035 2036 // We have a hit - Var0 and Var1 only differ by a constant offset! 2037 2038 // If we've been sext'ed then zext'd the maximum difference between Var0 and 2039 // Var1 is possible to calculate, but we're just interested in the absolute 2040 // minimum difference between the two. The minimum distance may occur due to 2041 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 2042 // the minimum distance between %i and %i + 5 is 3. 2043 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff; 2044 MinDiff = APIntOps::umin(MinDiff, Wrapped); 2045 APInt MinDiffBytes = 2046 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs(); 2047 2048 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 2049 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 2050 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 2051 // V2Size can fit in the MinDiffBytes gap. 2052 return MinDiffBytes.uge(V1Size + BaseOffset.abs()) && 2053 MinDiffBytes.uge(V2Size + BaseOffset.abs()); 2054 } 2055 2056 //===----------------------------------------------------------------------===// 2057 // BasicAliasAnalysis Pass 2058 //===----------------------------------------------------------------------===// 2059 2060 AnalysisKey BasicAA::Key; 2061 2062 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 2063 return BasicAAResult(F.getParent()->getDataLayout(), 2064 F, 2065 AM.getResult<TargetLibraryAnalysis>(F), 2066 AM.getResult<AssumptionAnalysis>(F), 2067 &AM.getResult<DominatorTreeAnalysis>(F), 2068 AM.getCachedResult<LoopAnalysis>(F), 2069 AM.getCachedResult<PhiValuesAnalysis>(F)); 2070 } 2071 2072 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 2073 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 2074 } 2075 2076 char BasicAAWrapperPass::ID = 0; 2077 2078 void BasicAAWrapperPass::anchor() {} 2079 2080 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa", 2081 "Basic Alias Analysis (stateless AA impl)", true, true) 2082 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2083 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2084 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2085 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 2086 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa", 2087 "Basic Alias Analysis (stateless AA impl)", true, true) 2088 2089 FunctionPass *llvm::createBasicAAWrapperPass() { 2090 return new BasicAAWrapperPass(); 2091 } 2092 2093 bool BasicAAWrapperPass::runOnFunction(Function &F) { 2094 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 2095 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 2096 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 2097 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 2098 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>(); 2099 2100 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, 2101 TLIWP.getTLI(F), ACT.getAssumptionCache(F), 2102 &DTWP.getDomTree(), 2103 LIWP ? &LIWP->getLoopInfo() : nullptr, 2104 PVWP ? &PVWP->getResult() : nullptr)); 2105 2106 return false; 2107 } 2108 2109 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 2110 AU.setPreservesAll(); 2111 AU.addRequired<AssumptionCacheTracker>(); 2112 AU.addRequired<DominatorTreeWrapperPass>(); 2113 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2114 AU.addUsedIfAvailable<PhiValuesWrapperPass>(); 2115 } 2116 2117 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 2118 return BasicAAResult( 2119 F.getParent()->getDataLayout(), F, 2120 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 2121 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 2122 } 2123