1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the primary stateless implementation of the 10 // Alias Analysis interface that implements identities (two different 11 // globals cannot alias, etc), but does no stateful analysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/BasicAliasAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/AssumptionCache.h" 22 #include "llvm/Analysis/CFG.h" 23 #include "llvm/Analysis/CaptureTracking.h" 24 #include "llvm/Analysis/InstructionSimplify.h" 25 #include "llvm/Analysis/LoopInfo.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/MemoryLocation.h" 28 #include "llvm/Analysis/PhiValues.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/ConstantRange.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Dominators.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GetElementPtrTypeIterator.h" 41 #include "llvm/IR/GlobalAlias.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/Metadata.h" 49 #include "llvm/IR/Operator.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Compiler.h" 58 #include "llvm/Support/KnownBits.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <cstdlib> 62 #include <utility> 63 64 #define DEBUG_TYPE "basicaa" 65 66 using namespace llvm; 67 68 /// Enable analysis of recursive PHI nodes. 69 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden, 70 cl::init(false)); 71 72 /// By default, even on 32-bit architectures we use 64-bit integers for 73 /// calculations. This will allow us to more-aggressively decompose indexing 74 /// expressions calculated using i64 values (e.g., long long in C) which is 75 /// common enough to worry about. 76 static cl::opt<bool> ForceAtLeast64Bits("basicaa-force-at-least-64b", 77 cl::Hidden, cl::init(true)); 78 static cl::opt<bool> DoubleCalcBits("basicaa-double-calc-bits", 79 cl::Hidden, cl::init(false)); 80 81 /// SearchLimitReached / SearchTimes shows how often the limit of 82 /// to decompose GEPs is reached. It will affect the precision 83 /// of basic alias analysis. 84 STATISTIC(SearchLimitReached, "Number of times the limit to " 85 "decompose GEPs is reached"); 86 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 87 88 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 89 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 90 /// careful with value equivalence. We use reachability to make sure a value 91 /// cannot be involved in a cycle. 92 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 93 94 // The max limit of the search depth in DecomposeGEPExpression() and 95 // GetUnderlyingObject(), both functions need to use the same search 96 // depth otherwise the algorithm in aliasGEP will assert. 97 static const unsigned MaxLookupSearchDepth = 6; 98 99 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, 100 FunctionAnalysisManager::Invalidator &Inv) { 101 // We don't care if this analysis itself is preserved, it has no state. But 102 // we need to check that the analyses it depends on have been. Note that we 103 // may be created without handles to some analyses and in that case don't 104 // depend on them. 105 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) || 106 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) || 107 (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) || 108 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA))) 109 return true; 110 111 // Otherwise this analysis result remains valid. 112 return false; 113 } 114 115 //===----------------------------------------------------------------------===// 116 // Useful predicates 117 //===----------------------------------------------------------------------===// 118 119 /// Returns true if the pointer is to a function-local object that never 120 /// escapes from the function. 121 static bool isNonEscapingLocalObject( 122 const Value *V, 123 SmallDenseMap<const Value *, bool, 8> *IsCapturedCache = nullptr) { 124 SmallDenseMap<const Value *, bool, 8>::iterator CacheIt; 125 if (IsCapturedCache) { 126 bool Inserted; 127 std::tie(CacheIt, Inserted) = IsCapturedCache->insert({V, false}); 128 if (!Inserted) 129 // Found cached result, return it! 130 return CacheIt->second; 131 } 132 133 // If this is a local allocation, check to see if it escapes. 134 if (isa<AllocaInst>(V) || isNoAliasCall(V)) { 135 // Set StoreCaptures to True so that we can assume in our callers that the 136 // pointer is not the result of a load instruction. Currently 137 // PointerMayBeCaptured doesn't have any special analysis for the 138 // StoreCaptures=false case; if it did, our callers could be refined to be 139 // more precise. 140 auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 141 if (IsCapturedCache) 142 CacheIt->second = Ret; 143 return Ret; 144 } 145 146 // If this is an argument that corresponds to a byval or noalias argument, 147 // then it has not escaped before entering the function. Check if it escapes 148 // inside the function. 149 if (const Argument *A = dyn_cast<Argument>(V)) 150 if (A->hasByValAttr() || A->hasNoAliasAttr()) { 151 // Note even if the argument is marked nocapture, we still need to check 152 // for copies made inside the function. The nocapture attribute only 153 // specifies that there are no copies made that outlive the function. 154 auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); 155 if (IsCapturedCache) 156 CacheIt->second = Ret; 157 return Ret; 158 } 159 160 return false; 161 } 162 163 /// Returns true if the pointer is one which would have been considered an 164 /// escape by isNonEscapingLocalObject. 165 static bool isEscapeSource(const Value *V) { 166 if (isa<CallBase>(V)) 167 return true; 168 169 if (isa<Argument>(V)) 170 return true; 171 172 // The load case works because isNonEscapingLocalObject considers all 173 // stores to be escapes (it passes true for the StoreCaptures argument 174 // to PointerMayBeCaptured). 175 if (isa<LoadInst>(V)) 176 return true; 177 178 return false; 179 } 180 181 /// Returns the size of the object specified by V or UnknownSize if unknown. 182 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 183 const TargetLibraryInfo &TLI, 184 bool NullIsValidLoc, 185 bool RoundToAlign = false) { 186 uint64_t Size; 187 ObjectSizeOpts Opts; 188 Opts.RoundToAlign = RoundToAlign; 189 Opts.NullIsUnknownSize = NullIsValidLoc; 190 if (getObjectSize(V, Size, DL, &TLI, Opts)) 191 return Size; 192 return MemoryLocation::UnknownSize; 193 } 194 195 /// Returns true if we can prove that the object specified by V is smaller than 196 /// Size. 197 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 198 const DataLayout &DL, 199 const TargetLibraryInfo &TLI, 200 bool NullIsValidLoc) { 201 // Note that the meanings of the "object" are slightly different in the 202 // following contexts: 203 // c1: llvm::getObjectSize() 204 // c2: llvm.objectsize() intrinsic 205 // c3: isObjectSmallerThan() 206 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 207 // refers to the "entire object". 208 // 209 // Consider this example: 210 // char *p = (char*)malloc(100) 211 // char *q = p+80; 212 // 213 // In the context of c1 and c2, the "object" pointed by q refers to the 214 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 215 // 216 // However, in the context of c3, the "object" refers to the chunk of memory 217 // being allocated. So, the "object" has 100 bytes, and q points to the middle 218 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 219 // parameter, before the llvm::getObjectSize() is called to get the size of 220 // entire object, we should: 221 // - either rewind the pointer q to the base-address of the object in 222 // question (in this case rewind to p), or 223 // - just give up. It is up to caller to make sure the pointer is pointing 224 // to the base address the object. 225 // 226 // We go for 2nd option for simplicity. 227 if (!isIdentifiedObject(V)) 228 return false; 229 230 // This function needs to use the aligned object size because we allow 231 // reads a bit past the end given sufficient alignment. 232 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, 233 /*RoundToAlign*/ true); 234 235 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 236 } 237 238 /// Return the minimal extent from \p V to the end of the underlying object, 239 /// assuming the result is used in an aliasing query. E.g., we do use the query 240 /// location size and the fact that null pointers cannot alias here. 241 static uint64_t getMinimalExtentFrom(const Value &V, 242 const LocationSize &LocSize, 243 const DataLayout &DL, 244 bool NullIsValidLoc) { 245 // If we have dereferenceability information we know a lower bound for the 246 // extent as accesses for a lower offset would be valid. We need to exclude 247 // the "or null" part if null is a valid pointer. 248 bool CanBeNull; 249 uint64_t DerefBytes = V.getPointerDereferenceableBytes(DL, CanBeNull); 250 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; 251 // If queried with a precise location size, we assume that location size to be 252 // accessed, thus valid. 253 if (LocSize.isPrecise()) 254 DerefBytes = std::max(DerefBytes, LocSize.getValue()); 255 return DerefBytes; 256 } 257 258 /// Returns true if we can prove that the object specified by V has size Size. 259 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 260 const TargetLibraryInfo &TLI, bool NullIsValidLoc) { 261 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc); 262 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 263 } 264 265 //===----------------------------------------------------------------------===// 266 // GetElementPtr Instruction Decomposition and Analysis 267 //===----------------------------------------------------------------------===// 268 269 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 270 /// B are constant integers. 271 /// 272 /// Returns the scale and offset values as APInts and return V as a Value*, and 273 /// return whether we looked through any sign or zero extends. The incoming 274 /// Value is known to have IntegerType, and it may already be sign or zero 275 /// extended. 276 /// 277 /// Note that this looks through extends, so the high bits may not be 278 /// represented in the result. 279 /*static*/ const Value *BasicAAResult::GetLinearExpression( 280 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits, 281 unsigned &SExtBits, const DataLayout &DL, unsigned Depth, 282 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) { 283 assert(V->getType()->isIntegerTy() && "Not an integer value"); 284 285 // Limit our recursion depth. 286 if (Depth == 6) { 287 Scale = 1; 288 Offset = 0; 289 return V; 290 } 291 292 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) { 293 // If it's a constant, just convert it to an offset and remove the variable. 294 // If we've been called recursively, the Offset bit width will be greater 295 // than the constant's (the Offset's always as wide as the outermost call), 296 // so we'll zext here and process any extension in the isa<SExtInst> & 297 // isa<ZExtInst> cases below. 298 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth()); 299 assert(Scale == 0 && "Constant values don't have a scale"); 300 return V; 301 } 302 303 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 304 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 305 // If we've been called recursively, then Offset and Scale will be wider 306 // than the BOp operands. We'll always zext it here as we'll process sign 307 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases). 308 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth()); 309 310 switch (BOp->getOpcode()) { 311 default: 312 // We don't understand this instruction, so we can't decompose it any 313 // further. 314 Scale = 1; 315 Offset = 0; 316 return V; 317 case Instruction::Or: 318 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 319 // analyze it. 320 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 321 BOp, DT)) { 322 Scale = 1; 323 Offset = 0; 324 return V; 325 } 326 LLVM_FALLTHROUGH; 327 case Instruction::Add: 328 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 329 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 330 Offset += RHS; 331 break; 332 case Instruction::Sub: 333 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 334 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 335 Offset -= RHS; 336 break; 337 case Instruction::Mul: 338 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 339 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 340 Offset *= RHS; 341 Scale *= RHS; 342 break; 343 case Instruction::Shl: 344 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 345 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 346 347 // We're trying to linearize an expression of the kind: 348 // shl i8 -128, 36 349 // where the shift count exceeds the bitwidth of the type. 350 // We can't decompose this further (the expression would return 351 // a poison value). 352 if (Offset.getBitWidth() < RHS.getLimitedValue() || 353 Scale.getBitWidth() < RHS.getLimitedValue()) { 354 Scale = 1; 355 Offset = 0; 356 return V; 357 } 358 359 Offset <<= RHS.getLimitedValue(); 360 Scale <<= RHS.getLimitedValue(); 361 // the semantics of nsw and nuw for left shifts don't match those of 362 // multiplications, so we won't propagate them. 363 NSW = NUW = false; 364 return V; 365 } 366 367 if (isa<OverflowingBinaryOperator>(BOp)) { 368 NUW &= BOp->hasNoUnsignedWrap(); 369 NSW &= BOp->hasNoSignedWrap(); 370 } 371 return V; 372 } 373 } 374 375 // Since GEP indices are sign extended anyway, we don't care about the high 376 // bits of a sign or zero extended value - just scales and offsets. The 377 // extensions have to be consistent though. 378 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) { 379 Value *CastOp = cast<CastInst>(V)->getOperand(0); 380 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits(); 381 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 382 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits; 383 const Value *Result = 384 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL, 385 Depth + 1, AC, DT, NSW, NUW); 386 387 // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this 388 // by just incrementing the number of bits we've extended by. 389 unsigned ExtendedBy = NewWidth - SmallWidth; 390 391 if (isa<SExtInst>(V) && ZExtBits == 0) { 392 // sext(sext(%x, a), b) == sext(%x, a + b) 393 394 if (NSW) { 395 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c) 396 // into sext(%x) + sext(c). We'll sext the Offset ourselves: 397 unsigned OldWidth = Offset.getBitWidth(); 398 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth); 399 } else { 400 // We may have signed-wrapped, so don't decompose sext(%x + c) into 401 // sext(%x) + sext(c) 402 Scale = 1; 403 Offset = 0; 404 Result = CastOp; 405 ZExtBits = OldZExtBits; 406 SExtBits = OldSExtBits; 407 } 408 SExtBits += ExtendedBy; 409 } else { 410 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b) 411 412 if (!NUW) { 413 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into 414 // zext(%x) + zext(c) 415 Scale = 1; 416 Offset = 0; 417 Result = CastOp; 418 ZExtBits = OldZExtBits; 419 SExtBits = OldSExtBits; 420 } 421 ZExtBits += ExtendedBy; 422 } 423 424 return Result; 425 } 426 427 Scale = 1; 428 Offset = 0; 429 return V; 430 } 431 432 /// To ensure a pointer offset fits in an integer of size PointerSize 433 /// (in bits) when that size is smaller than the maximum pointer size. This is 434 /// an issue, for example, in particular for 32b pointers with negative indices 435 /// that rely on two's complement wrap-arounds for precise alias information 436 /// where the maximum pointer size is 64b. 437 static APInt adjustToPointerSize(APInt Offset, unsigned PointerSize) { 438 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!"); 439 unsigned ShiftBits = Offset.getBitWidth() - PointerSize; 440 return (Offset << ShiftBits).ashr(ShiftBits); 441 } 442 443 static unsigned getMaxPointerSize(const DataLayout &DL) { 444 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits(); 445 if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64; 446 if (DoubleCalcBits) MaxPointerSize *= 2; 447 448 return MaxPointerSize; 449 } 450 451 /// If V is a symbolic pointer expression, decompose it into a base pointer 452 /// with a constant offset and a number of scaled symbolic offsets. 453 /// 454 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 455 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 456 /// specified amount, but which may have other unrepresented high bits. As 457 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 458 /// 459 /// When DataLayout is around, this function is capable of analyzing everything 460 /// that GetUnderlyingObject can look through. To be able to do that 461 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search 462 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks 463 /// through pointer casts. 464 bool BasicAAResult::DecomposeGEPExpression(const Value *V, 465 DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC, 466 DominatorTree *DT) { 467 // Limit recursion depth to limit compile time in crazy cases. 468 unsigned MaxLookup = MaxLookupSearchDepth; 469 SearchTimes++; 470 471 unsigned MaxPointerSize = getMaxPointerSize(DL); 472 Decomposed.VarIndices.clear(); 473 do { 474 // See if this is a bitcast or GEP. 475 const Operator *Op = dyn_cast<Operator>(V); 476 if (!Op) { 477 // The only non-operator case we can handle are GlobalAliases. 478 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 479 if (!GA->isInterposable()) { 480 V = GA->getAliasee(); 481 continue; 482 } 483 } 484 Decomposed.Base = V; 485 return false; 486 } 487 488 if (Op->getOpcode() == Instruction::BitCast || 489 Op->getOpcode() == Instruction::AddrSpaceCast) { 490 V = Op->getOperand(0); 491 continue; 492 } 493 494 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 495 if (!GEPOp) { 496 if (const auto *Call = dyn_cast<CallBase>(V)) { 497 // CaptureTracking can know about special capturing properties of some 498 // intrinsics like launder.invariant.group, that can't be expressed with 499 // the attributes, but have properties like returning aliasing pointer. 500 // Because some analysis may assume that nocaptured pointer is not 501 // returned from some special intrinsic (because function would have to 502 // be marked with returns attribute), it is crucial to use this function 503 // because it should be in sync with CaptureTracking. Not using it may 504 // cause weird miscompilations where 2 aliasing pointers are assumed to 505 // noalias. 506 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 507 V = RP; 508 continue; 509 } 510 } 511 512 // If it's not a GEP, hand it off to SimplifyInstruction to see if it 513 // can come up with something. This matches what GetUnderlyingObject does. 514 if (const Instruction *I = dyn_cast<Instruction>(V)) 515 // TODO: Get a DominatorTree and AssumptionCache and use them here 516 // (these are both now available in this function, but this should be 517 // updated when GetUnderlyingObject is updated). TLI should be 518 // provided also. 519 if (const Value *Simplified = 520 SimplifyInstruction(const_cast<Instruction *>(I), DL)) { 521 V = Simplified; 522 continue; 523 } 524 525 Decomposed.Base = V; 526 return false; 527 } 528 529 // Don't attempt to analyze GEPs over unsized objects. 530 if (!GEPOp->getSourceElementType()->isSized()) { 531 Decomposed.Base = V; 532 return false; 533 } 534 535 // Don't attempt to analyze GEPs if index scale is not a compile-time 536 // constant. 537 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) { 538 Decomposed.Base = V; 539 Decomposed.HasCompileTimeConstantScale = false; 540 return false; 541 } 542 543 unsigned AS = GEPOp->getPointerAddressSpace(); 544 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 545 gep_type_iterator GTI = gep_type_begin(GEPOp); 546 unsigned PointerSize = DL.getPointerSizeInBits(AS); 547 // Assume all GEP operands are constants until proven otherwise. 548 bool GepHasConstantOffset = true; 549 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 550 I != E; ++I, ++GTI) { 551 const Value *Index = *I; 552 // Compute the (potentially symbolic) offset in bytes for this index. 553 if (StructType *STy = GTI.getStructTypeOrNull()) { 554 // For a struct, add the member offset. 555 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 556 if (FieldNo == 0) 557 continue; 558 559 Decomposed.StructOffset += 560 DL.getStructLayout(STy)->getElementOffset(FieldNo); 561 continue; 562 } 563 564 // For an array/pointer, add the element offset, explicitly scaled. 565 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 566 if (CIdx->isZero()) 567 continue; 568 APInt Offset = (DL.getTypeAllocSize(GTI.getIndexedType()) * 569 CIdx->getValue().sextOrSelf(MaxPointerSize)) 570 .sextOrTrunc(MaxPointerSize); 571 Decomposed.OtherOffset += Offset; 572 Decomposed.MinOtherOffset += Offset; 573 continue; 574 } 575 576 GepHasConstantOffset = false; 577 578 APInt Scale(MaxPointerSize, 579 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); 580 unsigned ZExtBits = 0, SExtBits = 0; 581 582 // If the integer type is smaller than the pointer size, it is implicitly 583 // sign extended to pointer size. 584 unsigned Width = Index->getType()->getIntegerBitWidth(); 585 if (PointerSize > Width) 586 SExtBits += PointerSize - Width; 587 588 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 589 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 590 bool NSW = true, NUW = true; 591 const Value *OrigIndex = Index; 592 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits, 593 SExtBits, DL, 0, AC, DT, NSW, NUW); 594 595 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 596 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 597 598 // It can be the case that, even through C1*V+C2 does not overflow for 599 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot 600 // decompose the expression in this way. 601 // 602 // FIXME: C1*Scale and the other operations in the decomposed 603 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this 604 // possibility. 605 APInt WideScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize*2) * 606 Scale.sext(MaxPointerSize*2); 607 if (WideScaledOffset.getMinSignedBits() > MaxPointerSize) { 608 Index = OrigIndex; 609 IndexScale = 1; 610 IndexOffset = 0; 611 612 ZExtBits = SExtBits = 0; 613 if (PointerSize > Width) 614 SExtBits += PointerSize - Width; 615 } else { 616 APInt Offset = IndexOffset.sextOrTrunc(MaxPointerSize) * Scale; 617 Decomposed.OtherOffset += Offset; 618 APInt IndexBound = 619 computeConstantRange(Index, true, AC, dyn_cast<Instruction>(GEPOp)) 620 .getLower() 621 .sextOrTrunc(MaxPointerSize); 622 // If we find a non-negative lower bound for the index value, we can 623 // improve the known offset to include it. By just using non-negative 624 // lower bounds, we conveniently skip any index values for which we do 625 // not find a useful lower bound. 626 if (IndexBound.isNonNegative()) 627 Decomposed.MinOtherOffset += Offset + IndexBound * Scale; 628 Scale *= IndexScale.sextOrTrunc(MaxPointerSize); 629 } 630 631 // If we already had an occurrence of this index variable, merge this 632 // scale into it. For example, we want to handle: 633 // A[x][x] -> x*16 + x*4 -> x*20 634 // This also ensures that 'x' only appears in the index list once. 635 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 636 if (Decomposed.VarIndices[i].V == Index && 637 Decomposed.VarIndices[i].ZExtBits == ZExtBits && 638 Decomposed.VarIndices[i].SExtBits == SExtBits) { 639 Scale += Decomposed.VarIndices[i].Scale; 640 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 641 break; 642 } 643 } 644 645 // Make sure that we have a scale that makes sense for this target's 646 // pointer size. 647 Scale = adjustToPointerSize(Scale, PointerSize); 648 649 if (!!Scale) { 650 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale}; 651 Decomposed.VarIndices.push_back(Entry); 652 } 653 } 654 655 // Take care of wrap-arounds 656 if (GepHasConstantOffset) { 657 Decomposed.StructOffset = 658 adjustToPointerSize(Decomposed.StructOffset, PointerSize); 659 Decomposed.OtherOffset = 660 adjustToPointerSize(Decomposed.OtherOffset, PointerSize); 661 } 662 663 // Analyze the base pointer next. 664 V = GEPOp->getOperand(0); 665 } while (--MaxLookup); 666 667 // If the chain of expressions is too deep, just return early. 668 Decomposed.Base = V; 669 SearchLimitReached++; 670 return true; 671 } 672 673 /// Returns whether the given pointer value points to memory that is local to 674 /// the function, with global constants being considered local to all 675 /// functions. 676 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 677 AAQueryInfo &AAQI, bool OrLocal) { 678 assert(Visited.empty() && "Visited must be cleared after use!"); 679 680 unsigned MaxLookup = 8; 681 SmallVector<const Value *, 16> Worklist; 682 Worklist.push_back(Loc.Ptr); 683 do { 684 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL); 685 if (!Visited.insert(V).second) { 686 Visited.clear(); 687 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 688 } 689 690 // An alloca instruction defines local memory. 691 if (OrLocal && isa<AllocaInst>(V)) 692 continue; 693 694 // A global constant counts as local memory for our purposes. 695 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 696 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 697 // global to be marked constant in some modules and non-constant in 698 // others. GV may even be a declaration, not a definition. 699 if (!GV->isConstant()) { 700 Visited.clear(); 701 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 702 } 703 continue; 704 } 705 706 // If both select values point to local memory, then so does the select. 707 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 708 Worklist.push_back(SI->getTrueValue()); 709 Worklist.push_back(SI->getFalseValue()); 710 continue; 711 } 712 713 // If all values incoming to a phi node point to local memory, then so does 714 // the phi. 715 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 716 // Don't bother inspecting phi nodes with many operands. 717 if (PN->getNumIncomingValues() > MaxLookup) { 718 Visited.clear(); 719 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 720 } 721 for (Value *IncValue : PN->incoming_values()) 722 Worklist.push_back(IncValue); 723 continue; 724 } 725 726 // Otherwise be conservative. 727 Visited.clear(); 728 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 729 } while (!Worklist.empty() && --MaxLookup); 730 731 Visited.clear(); 732 return Worklist.empty(); 733 } 734 735 /// Returns the behavior when calling the given call site. 736 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { 737 if (Call->doesNotAccessMemory()) 738 // Can't do better than this. 739 return FMRB_DoesNotAccessMemory; 740 741 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 742 743 // If the callsite knows it only reads memory, don't return worse 744 // than that. 745 if (Call->onlyReadsMemory()) 746 Min = FMRB_OnlyReadsMemory; 747 else if (Call->doesNotReadMemory()) 748 Min = FMRB_OnlyWritesMemory; 749 750 if (Call->onlyAccessesArgMemory()) 751 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 752 else if (Call->onlyAccessesInaccessibleMemory()) 753 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 754 else if (Call->onlyAccessesInaccessibleMemOrArgMem()) 755 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 756 757 // If the call has operand bundles then aliasing attributes from the function 758 // it calls do not directly apply to the call. This can be made more precise 759 // in the future. 760 if (!Call->hasOperandBundles()) 761 if (const Function *F = Call->getCalledFunction()) 762 Min = 763 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 764 765 return Min; 766 } 767 768 /// Returns the behavior when calling the given function. For use when the call 769 /// site is not known. 770 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 771 // If the function declares it doesn't access memory, we can't do better. 772 if (F->doesNotAccessMemory()) 773 return FMRB_DoesNotAccessMemory; 774 775 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 776 777 // If the function declares it only reads memory, go with that. 778 if (F->onlyReadsMemory()) 779 Min = FMRB_OnlyReadsMemory; 780 else if (F->doesNotReadMemory()) 781 Min = FMRB_OnlyWritesMemory; 782 783 if (F->onlyAccessesArgMemory()) 784 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 785 else if (F->onlyAccessesInaccessibleMemory()) 786 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 787 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 788 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 789 790 return Min; 791 } 792 793 /// Returns true if this is a writeonly (i.e Mod only) parameter. 794 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, 795 const TargetLibraryInfo &TLI) { 796 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) 797 return true; 798 799 // We can bound the aliasing properties of memset_pattern16 just as we can 800 // for memcpy/memset. This is particularly important because the 801 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 802 // whenever possible. 803 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 804 // attributes. 805 LibFunc F; 806 if (Call->getCalledFunction() && 807 TLI.getLibFunc(*Call->getCalledFunction(), F) && 808 F == LibFunc_memset_pattern16 && TLI.has(F)) 809 if (ArgIdx == 0) 810 return true; 811 812 // TODO: memset_pattern4, memset_pattern8 813 // TODO: _chk variants 814 // TODO: strcmp, strcpy 815 816 return false; 817 } 818 819 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, 820 unsigned ArgIdx) { 821 // Checking for known builtin intrinsics and target library functions. 822 if (isWriteOnlyParam(Call, ArgIdx, TLI)) 823 return ModRefInfo::Mod; 824 825 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) 826 return ModRefInfo::Ref; 827 828 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) 829 return ModRefInfo::NoModRef; 830 831 return AAResultBase::getArgModRefInfo(Call, ArgIdx); 832 } 833 834 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { 835 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call); 836 return II && II->getIntrinsicID() == IID; 837 } 838 839 #ifndef NDEBUG 840 static const Function *getParent(const Value *V) { 841 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 842 if (!inst->getParent()) 843 return nullptr; 844 return inst->getParent()->getParent(); 845 } 846 847 if (const Argument *arg = dyn_cast<Argument>(V)) 848 return arg->getParent(); 849 850 return nullptr; 851 } 852 853 static bool notDifferentParent(const Value *O1, const Value *O2) { 854 855 const Function *F1 = getParent(O1); 856 const Function *F2 = getParent(O2); 857 858 return !F1 || !F2 || F1 == F2; 859 } 860 #endif 861 862 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 863 const MemoryLocation &LocB, 864 AAQueryInfo &AAQI) { 865 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 866 "BasicAliasAnalysis doesn't support interprocedural queries."); 867 868 // If we have a directly cached entry for these locations, we have recursed 869 // through this once, so just return the cached results. Notably, when this 870 // happens, we don't clear the cache. 871 auto CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocA, LocB)); 872 if (CacheIt != AAQI.AliasCache.end()) 873 return CacheIt->second; 874 875 CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocB, LocA)); 876 if (CacheIt != AAQI.AliasCache.end()) 877 return CacheIt->second; 878 879 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr, 880 LocB.Size, LocB.AATags, AAQI); 881 882 VisitedPhiBBs.clear(); 883 return Alias; 884 } 885 886 /// Checks to see if the specified callsite can clobber the specified memory 887 /// object. 888 /// 889 /// Since we only look at local properties of this function, we really can't 890 /// say much about this query. We do, however, use simple "address taken" 891 /// analysis on local objects. 892 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, 893 const MemoryLocation &Loc, 894 AAQueryInfo &AAQI) { 895 assert(notDifferentParent(Call, Loc.Ptr) && 896 "AliasAnalysis query involving multiple functions!"); 897 898 const Value *Object = GetUnderlyingObject(Loc.Ptr, DL); 899 900 // Calls marked 'tail' cannot read or write allocas from the current frame 901 // because the current frame might be destroyed by the time they run. However, 902 // a tail call may use an alloca with byval. Calling with byval copies the 903 // contents of the alloca into argument registers or stack slots, so there is 904 // no lifetime issue. 905 if (isa<AllocaInst>(Object)) 906 if (const CallInst *CI = dyn_cast<CallInst>(Call)) 907 if (CI->isTailCall() && 908 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 909 return ModRefInfo::NoModRef; 910 911 // Stack restore is able to modify unescaped dynamic allocas. Assume it may 912 // modify them even though the alloca is not escaped. 913 if (auto *AI = dyn_cast<AllocaInst>(Object)) 914 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore)) 915 return ModRefInfo::Mod; 916 917 // If the pointer is to a locally allocated object that does not escape, 918 // then the call can not mod/ref the pointer unless the call takes the pointer 919 // as an argument, and itself doesn't capture it. 920 if (!isa<Constant>(Object) && Call != Object && 921 isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) { 922 923 // Optimistically assume that call doesn't touch Object and check this 924 // assumption in the following loop. 925 ModRefInfo Result = ModRefInfo::NoModRef; 926 bool IsMustAlias = true; 927 928 unsigned OperandNo = 0; 929 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); 930 CI != CE; ++CI, ++OperandNo) { 931 // Only look at the no-capture or byval pointer arguments. If this 932 // pointer were passed to arguments that were neither of these, then it 933 // couldn't be no-capture. 934 if (!(*CI)->getType()->isPointerTy() || 935 (!Call->doesNotCapture(OperandNo) && 936 OperandNo < Call->getNumArgOperands() && 937 !Call->isByValArgument(OperandNo))) 938 continue; 939 940 // Call doesn't access memory through this operand, so we don't care 941 // if it aliases with Object. 942 if (Call->doesNotAccessMemory(OperandNo)) 943 continue; 944 945 // If this is a no-capture pointer argument, see if we can tell that it 946 // is impossible to alias the pointer we're checking. 947 AliasResult AR = getBestAAResults().alias(MemoryLocation(*CI), 948 MemoryLocation(Object), AAQI); 949 if (AR != MustAlias) 950 IsMustAlias = false; 951 // Operand doesn't alias 'Object', continue looking for other aliases 952 if (AR == NoAlias) 953 continue; 954 // Operand aliases 'Object', but call doesn't modify it. Strengthen 955 // initial assumption and keep looking in case if there are more aliases. 956 if (Call->onlyReadsMemory(OperandNo)) { 957 Result = setRef(Result); 958 continue; 959 } 960 // Operand aliases 'Object' but call only writes into it. 961 if (Call->doesNotReadMemory(OperandNo)) { 962 Result = setMod(Result); 963 continue; 964 } 965 // This operand aliases 'Object' and call reads and writes into it. 966 // Setting ModRef will not yield an early return below, MustAlias is not 967 // used further. 968 Result = ModRefInfo::ModRef; 969 break; 970 } 971 972 // No operand aliases, reset Must bit. Add below if at least one aliases 973 // and all aliases found are MustAlias. 974 if (isNoModRef(Result)) 975 IsMustAlias = false; 976 977 // Early return if we improved mod ref information 978 if (!isModAndRefSet(Result)) { 979 if (isNoModRef(Result)) 980 return ModRefInfo::NoModRef; 981 return IsMustAlias ? setMust(Result) : clearMust(Result); 982 } 983 } 984 985 // If the call is malloc/calloc like, we can assume that it doesn't 986 // modify any IR visible value. This is only valid because we assume these 987 // routines do not read values visible in the IR. TODO: Consider special 988 // casing realloc and strdup routines which access only their arguments as 989 // well. Or alternatively, replace all of this with inaccessiblememonly once 990 // that's implemented fully. 991 if (isMallocOrCallocLikeFn(Call, &TLI)) { 992 // Be conservative if the accessed pointer may alias the allocation - 993 // fallback to the generic handling below. 994 if (getBestAAResults().alias(MemoryLocation(Call), Loc, AAQI) == NoAlias) 995 return ModRefInfo::NoModRef; 996 } 997 998 // The semantics of memcpy intrinsics forbid overlap between their respective 999 // operands, i.e., source and destination of any given memcpy must no-alias. 1000 // If Loc must-aliases either one of these two locations, then it necessarily 1001 // no-aliases the other. 1002 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) { 1003 AliasResult SrcAA, DestAA; 1004 1005 if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst), 1006 Loc, AAQI)) == MustAlias) 1007 // Loc is exactly the memcpy source thus disjoint from memcpy dest. 1008 return ModRefInfo::Ref; 1009 if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst), 1010 Loc, AAQI)) == MustAlias) 1011 // The converse case. 1012 return ModRefInfo::Mod; 1013 1014 // It's also possible for Loc to alias both src and dest, or neither. 1015 ModRefInfo rv = ModRefInfo::NoModRef; 1016 if (SrcAA != NoAlias) 1017 rv = setRef(rv); 1018 if (DestAA != NoAlias) 1019 rv = setMod(rv); 1020 return rv; 1021 } 1022 1023 // While the assume intrinsic is marked as arbitrarily writing so that 1024 // proper control dependencies will be maintained, it never aliases any 1025 // particular memory location. 1026 if (isIntrinsicCall(Call, Intrinsic::assume)) 1027 return ModRefInfo::NoModRef; 1028 1029 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 1030 // that proper control dependencies are maintained but they never mods any 1031 // particular memory location. 1032 // 1033 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1034 // heap state at the point the guard is issued needs to be consistent in case 1035 // the guard invokes the "deopt" continuation. 1036 if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) 1037 return ModRefInfo::Ref; 1038 1039 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 1040 // writing so that proper control dependencies are maintained but they never 1041 // mod any particular memory location visible to the IR. 1042 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 1043 // intrinsic is now modeled as reading memory. This prevents hoisting the 1044 // invariant.start intrinsic over stores. Consider: 1045 // *ptr = 40; 1046 // *ptr = 50; 1047 // invariant_start(ptr) 1048 // int val = *ptr; 1049 // print(val); 1050 // 1051 // This cannot be transformed to: 1052 // 1053 // *ptr = 40; 1054 // invariant_start(ptr) 1055 // *ptr = 50; 1056 // int val = *ptr; 1057 // print(val); 1058 // 1059 // The transformation will cause the second store to be ignored (based on 1060 // rules of invariant.start) and print 40, while the first program always 1061 // prints 50. 1062 if (isIntrinsicCall(Call, Intrinsic::invariant_start)) 1063 return ModRefInfo::Ref; 1064 1065 // The AAResultBase base class has some smarts, lets use them. 1066 return AAResultBase::getModRefInfo(Call, Loc, AAQI); 1067 } 1068 1069 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, 1070 const CallBase *Call2, 1071 AAQueryInfo &AAQI) { 1072 // While the assume intrinsic is marked as arbitrarily writing so that 1073 // proper control dependencies will be maintained, it never aliases any 1074 // particular memory location. 1075 if (isIntrinsicCall(Call1, Intrinsic::assume) || 1076 isIntrinsicCall(Call2, Intrinsic::assume)) 1077 return ModRefInfo::NoModRef; 1078 1079 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 1080 // that proper control dependencies are maintained but they never mod any 1081 // particular memory location. 1082 // 1083 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1084 // heap state at the point the guard is issued needs to be consistent in case 1085 // the guard invokes the "deopt" continuation. 1086 1087 // NB! This function is *not* commutative, so we special case two 1088 // possibilities for guard intrinsics. 1089 1090 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) 1091 return isModSet(createModRefInfo(getModRefBehavior(Call2))) 1092 ? ModRefInfo::Ref 1093 : ModRefInfo::NoModRef; 1094 1095 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) 1096 return isModSet(createModRefInfo(getModRefBehavior(Call1))) 1097 ? ModRefInfo::Mod 1098 : ModRefInfo::NoModRef; 1099 1100 // The AAResultBase base class has some smarts, lets use them. 1101 return AAResultBase::getModRefInfo(Call1, Call2, AAQI); 1102 } 1103 1104 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators, 1105 /// both having the exact same pointer operand. 1106 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, 1107 LocationSize MaybeV1Size, 1108 const GEPOperator *GEP2, 1109 LocationSize MaybeV2Size, 1110 const DataLayout &DL) { 1111 assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() == 1112 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() && 1113 GEP1->getPointerOperandType() == GEP2->getPointerOperandType() && 1114 "Expected GEPs with the same pointer operand"); 1115 1116 // Try to determine whether GEP1 and GEP2 index through arrays, into structs, 1117 // such that the struct field accesses provably cannot alias. 1118 // We also need at least two indices (the pointer, and the struct field). 1119 if (GEP1->getNumIndices() != GEP2->getNumIndices() || 1120 GEP1->getNumIndices() < 2) 1121 return MayAlias; 1122 1123 // If we don't know the size of the accesses through both GEPs, we can't 1124 // determine whether the struct fields accessed can't alias. 1125 if (MaybeV1Size == LocationSize::unknown() || 1126 MaybeV2Size == LocationSize::unknown()) 1127 return MayAlias; 1128 1129 const uint64_t V1Size = MaybeV1Size.getValue(); 1130 const uint64_t V2Size = MaybeV2Size.getValue(); 1131 1132 ConstantInt *C1 = 1133 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1)); 1134 ConstantInt *C2 = 1135 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1)); 1136 1137 // If the last (struct) indices are constants and are equal, the other indices 1138 // might be also be dynamically equal, so the GEPs can alias. 1139 if (C1 && C2) { 1140 unsigned BitWidth = std::max(C1->getBitWidth(), C2->getBitWidth()); 1141 if (C1->getValue().sextOrSelf(BitWidth) == 1142 C2->getValue().sextOrSelf(BitWidth)) 1143 return MayAlias; 1144 } 1145 1146 // Find the last-indexed type of the GEP, i.e., the type you'd get if 1147 // you stripped the last index. 1148 // On the way, look at each indexed type. If there's something other 1149 // than an array, different indices can lead to different final types. 1150 SmallVector<Value *, 8> IntermediateIndices; 1151 1152 // Insert the first index; we don't need to check the type indexed 1153 // through it as it only drops the pointer indirection. 1154 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine"); 1155 IntermediateIndices.push_back(GEP1->getOperand(1)); 1156 1157 // Insert all the remaining indices but the last one. 1158 // Also, check that they all index through arrays. 1159 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) { 1160 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType( 1161 GEP1->getSourceElementType(), IntermediateIndices))) 1162 return MayAlias; 1163 IntermediateIndices.push_back(GEP1->getOperand(i + 1)); 1164 } 1165 1166 auto *Ty = GetElementPtrInst::getIndexedType( 1167 GEP1->getSourceElementType(), IntermediateIndices); 1168 StructType *LastIndexedStruct = dyn_cast<StructType>(Ty); 1169 1170 if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) { 1171 // We know that: 1172 // - both GEPs begin indexing from the exact same pointer; 1173 // - the last indices in both GEPs are constants, indexing into a sequential 1174 // type (array or vector); 1175 // - both GEPs only index through arrays prior to that. 1176 // 1177 // Because array indices greater than the number of elements are valid in 1178 // GEPs, unless we know the intermediate indices are identical between 1179 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't 1180 // partially overlap. We also need to check that the loaded size matches 1181 // the element size, otherwise we could still have overlap. 1182 Type *LastElementTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0); 1183 const uint64_t ElementSize = 1184 DL.getTypeStoreSize(LastElementTy).getFixedSize(); 1185 if (V1Size != ElementSize || V2Size != ElementSize) 1186 return MayAlias; 1187 1188 for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i) 1189 if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1)) 1190 return MayAlias; 1191 1192 // Now we know that the array/pointer that GEP1 indexes into and that 1193 // that GEP2 indexes into must either precisely overlap or be disjoint. 1194 // Because they cannot partially overlap and because fields in an array 1195 // cannot overlap, if we can prove the final indices are different between 1196 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias. 1197 1198 // If the last indices are constants, we've already checked they don't 1199 // equal each other so we can exit early. 1200 if (C1 && C2) 1201 return NoAlias; 1202 { 1203 Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1); 1204 Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1); 1205 if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) { 1206 // If one of the indices is a PHI node, be safe and only use 1207 // computeKnownBits so we don't make any assumptions about the 1208 // relationships between the two indices. This is important if we're 1209 // asking about values from different loop iterations. See PR32314. 1210 // TODO: We may be able to change the check so we only do this when 1211 // we definitely looked through a PHINode. 1212 if (GEP1LastIdx != GEP2LastIdx && 1213 GEP1LastIdx->getType() == GEP2LastIdx->getType()) { 1214 KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL); 1215 KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL); 1216 if (Known1.Zero.intersects(Known2.One) || 1217 Known1.One.intersects(Known2.Zero)) 1218 return NoAlias; 1219 } 1220 } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL)) 1221 return NoAlias; 1222 } 1223 return MayAlias; 1224 } else if (!LastIndexedStruct || !C1 || !C2) { 1225 return MayAlias; 1226 } 1227 1228 if (C1->getValue().getActiveBits() > 64 || 1229 C2->getValue().getActiveBits() > 64) 1230 return MayAlias; 1231 1232 // We know that: 1233 // - both GEPs begin indexing from the exact same pointer; 1234 // - the last indices in both GEPs are constants, indexing into a struct; 1235 // - said indices are different, hence, the pointed-to fields are different; 1236 // - both GEPs only index through arrays prior to that. 1237 // 1238 // This lets us determine that the struct that GEP1 indexes into and the 1239 // struct that GEP2 indexes into must either precisely overlap or be 1240 // completely disjoint. Because they cannot partially overlap, indexing into 1241 // different non-overlapping fields of the struct will never alias. 1242 1243 // Therefore, the only remaining thing needed to show that both GEPs can't 1244 // alias is that the fields are not overlapping. 1245 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct); 1246 const uint64_t StructSize = SL->getSizeInBytes(); 1247 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue()); 1248 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue()); 1249 1250 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size, 1251 uint64_t V2Off, uint64_t V2Size) { 1252 return V1Off < V2Off && V1Off + V1Size <= V2Off && 1253 ((V2Off + V2Size <= StructSize) || 1254 (V2Off + V2Size - StructSize <= V1Off)); 1255 }; 1256 1257 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) || 1258 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size)) 1259 return NoAlias; 1260 1261 return MayAlias; 1262 } 1263 1264 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the 1265 // beginning of the object the GEP points would have a negative offset with 1266 // repsect to the alloca, that means the GEP can not alias pointer (b). 1267 // Note that the pointer based on the alloca may not be a GEP. For 1268 // example, it may be the alloca itself. 1269 // The same applies if (b) is based on a GlobalVariable. Note that just being 1270 // based on isIdentifiedObject() is not enough - we need an identified object 1271 // that does not permit access to negative offsets. For example, a negative 1272 // offset from a noalias argument or call can be inbounds w.r.t the actual 1273 // underlying object. 1274 // 1275 // For example, consider: 1276 // 1277 // struct { int f0, int f1, ...} foo; 1278 // foo alloca; 1279 // foo* random = bar(alloca); 1280 // int *f0 = &alloca.f0 1281 // int *f1 = &random->f1; 1282 // 1283 // Which is lowered, approximately, to: 1284 // 1285 // %alloca = alloca %struct.foo 1286 // %random = call %struct.foo* @random(%struct.foo* %alloca) 1287 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0 1288 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1 1289 // 1290 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated 1291 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also 1292 // point into the same object. But since %f0 points to the beginning of %alloca, 1293 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher 1294 // than (%alloca - 1), and so is not inbounds, a contradiction. 1295 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp, 1296 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject, 1297 LocationSize MaybeObjectAccessSize) { 1298 // If the object access size is unknown, or the GEP isn't inbounds, bail. 1299 if (MaybeObjectAccessSize == LocationSize::unknown() || !GEPOp->isInBounds()) 1300 return false; 1301 1302 const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue(); 1303 1304 // We need the object to be an alloca or a globalvariable, and want to know 1305 // the offset of the pointer from the object precisely, so no variable 1306 // indices are allowed. 1307 if (!(isa<AllocaInst>(DecompObject.Base) || 1308 isa<GlobalVariable>(DecompObject.Base)) || 1309 !DecompObject.VarIndices.empty()) 1310 return false; 1311 1312 APInt ObjectBaseOffset = DecompObject.StructOffset + 1313 DecompObject.OtherOffset; 1314 1315 // If the GEP has no variable indices, we know the precise offset 1316 // from the base, then use it. If the GEP has variable indices, 1317 // we can't get exact GEP offset to identify pointer alias. So return 1318 // false in that case. 1319 if (!DecompGEP.VarIndices.empty()) 1320 return false; 1321 1322 APInt GEPBaseOffset = DecompGEP.StructOffset; 1323 GEPBaseOffset += DecompGEP.OtherOffset; 1324 1325 return GEPBaseOffset.sge(ObjectBaseOffset + (int64_t)ObjectAccessSize); 1326 } 1327 1328 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1329 /// another pointer. 1330 /// 1331 /// We know that V1 is a GEP, but we don't know anything about V2. 1332 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for 1333 /// V2. 1334 AliasResult BasicAAResult::aliasGEP( 1335 const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo, 1336 const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo, 1337 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { 1338 DecomposedGEP DecompGEP1, DecompGEP2; 1339 unsigned MaxPointerSize = getMaxPointerSize(DL); 1340 DecompGEP1.StructOffset = DecompGEP1.OtherOffset = APInt(MaxPointerSize, 0); 1341 DecompGEP2.StructOffset = DecompGEP2.OtherOffset = APInt(MaxPointerSize, 0); 1342 DecompGEP1.HasCompileTimeConstantScale = 1343 DecompGEP2.HasCompileTimeConstantScale = true; 1344 DecompGEP1.MinOtherOffset = APInt(MaxPointerSize, 0); 1345 DecompGEP2.MinOtherOffset = APInt(MaxPointerSize, 0); 1346 1347 bool GEP1MaxLookupReached = 1348 DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT); 1349 bool GEP2MaxLookupReached = 1350 DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT); 1351 1352 // Don't attempt to analyze the decomposed GEP if index scale is not a 1353 // compile-time constant. 1354 if (!DecompGEP1.HasCompileTimeConstantScale || 1355 !DecompGEP2.HasCompileTimeConstantScale) 1356 return MayAlias; 1357 1358 APInt GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset; 1359 APInt GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset; 1360 APInt GEP1BaseOffsetMin = DecompGEP1.StructOffset + DecompGEP1.MinOtherOffset; 1361 APInt GEP2BaseOffsetMin = DecompGEP2.StructOffset + DecompGEP2.MinOtherOffset; 1362 1363 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 && 1364 "DecomposeGEPExpression returned a result different from " 1365 "GetUnderlyingObject"); 1366 1367 // If the GEP's offset relative to its base is such that the base would 1368 // fall below the start of the object underlying V2, then the GEP and V2 1369 // cannot alias. 1370 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached && 1371 isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size)) 1372 return NoAlias; 1373 // If we have two gep instructions with must-alias or not-alias'ing base 1374 // pointers, figure out if the indexes to the GEP tell us anything about the 1375 // derived pointer. 1376 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 1377 // Check for the GEP base being at a negative offset, this time in the other 1378 // direction. 1379 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached && 1380 isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size)) 1381 return NoAlias; 1382 // Do the base pointers alias? 1383 AliasResult BaseAlias = 1384 aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(), 1385 UnderlyingV2, LocationSize::unknown(), AAMDNodes(), AAQI); 1386 1387 // Check for geps of non-aliasing underlying pointers where the offsets are 1388 // identical. 1389 if ((BaseAlias == MayAlias) && V1Size == V2Size) { 1390 // Do the base pointers alias assuming type and size. 1391 AliasResult PreciseBaseAlias = aliasCheck( 1392 UnderlyingV1, V1Size, V1AAInfo, UnderlyingV2, V2Size, V2AAInfo, AAQI); 1393 if (PreciseBaseAlias == NoAlias) { 1394 // See if the computed offset from the common pointer tells us about the 1395 // relation of the resulting pointer. 1396 // If the max search depth is reached the result is undefined 1397 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1398 return MayAlias; 1399 1400 // Same offsets. 1401 if (GEP1BaseOffset == GEP2BaseOffset && 1402 DecompGEP1.VarIndices == DecompGEP2.VarIndices) 1403 return NoAlias; 1404 } 1405 } 1406 1407 // If we get a No or May, then return it immediately, no amount of analysis 1408 // will improve this situation. 1409 if (BaseAlias != MustAlias) { 1410 assert(BaseAlias == NoAlias || BaseAlias == MayAlias); 1411 return BaseAlias; 1412 } 1413 1414 // Otherwise, we have a MustAlias. Since the base pointers alias each other 1415 // exactly, see if the computed offset from the common pointer tells us 1416 // about the relation of the resulting pointer. 1417 // If we know the two GEPs are based off of the exact same pointer (and not 1418 // just the same underlying object), see if that tells us anything about 1419 // the resulting pointers. 1420 if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() == 1421 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() && 1422 GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) { 1423 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL); 1424 // If we couldn't find anything interesting, don't abandon just yet. 1425 if (R != MayAlias) 1426 return R; 1427 } 1428 1429 // If the max search depth is reached, the result is undefined 1430 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1431 return MayAlias; 1432 1433 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1434 // symbolic difference. 1435 GEP1BaseOffset -= GEP2BaseOffset; 1436 GEP1BaseOffsetMin -= GEP2BaseOffsetMin; 1437 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices); 1438 1439 } else { 1440 // Check to see if these two pointers are related by the getelementptr 1441 // instruction. If one pointer is a GEP with a non-zero index of the other 1442 // pointer, we know they cannot alias. 1443 1444 // If both accesses are unknown size, we can't do anything useful here. 1445 if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown()) 1446 return MayAlias; 1447 1448 AliasResult R = aliasCheck(UnderlyingV1, LocationSize::unknown(), 1449 AAMDNodes(), V2, LocationSize::unknown(), 1450 V2AAInfo, AAQI, nullptr, UnderlyingV2); 1451 if (R != MustAlias) { 1452 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 1453 // If V2 is known not to alias GEP base pointer, then the two values 1454 // cannot alias per GEP semantics: "Any memory access must be done through 1455 // a pointer value associated with an address range of the memory access, 1456 // otherwise the behavior is undefined.". 1457 assert(R == NoAlias || R == MayAlias); 1458 return R; 1459 } 1460 1461 // If the max search depth is reached the result is undefined 1462 if (GEP1MaxLookupReached) 1463 return MayAlias; 1464 } 1465 1466 // In the two GEP Case, if there is no difference in the offsets of the 1467 // computed pointers, the resultant pointers are a must alias. This 1468 // happens when we have two lexically identical GEP's (for example). 1469 // 1470 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 1471 // must aliases the GEP, the end result is a must alias also. 1472 if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty()) 1473 return MustAlias; 1474 1475 // If there is a constant difference between the pointers, but the difference 1476 // is less than the size of the associated memory object, then we know 1477 // that the objects are partially overlapping. If the difference is 1478 // greater, we know they do not overlap. 1479 if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) { 1480 if (GEP1BaseOffset.sge(0)) { 1481 if (V2Size != LocationSize::unknown()) { 1482 if (GEP1BaseOffset.ult(V2Size.getValue())) 1483 return PartialAlias; 1484 return NoAlias; 1485 } 1486 } else { 1487 // We have the situation where: 1488 // + + 1489 // | BaseOffset | 1490 // ---------------->| 1491 // |-->V1Size |-------> V2Size 1492 // GEP1 V2 1493 // We need to know that V2Size is not unknown, otherwise we might have 1494 // stripped a gep with negative index ('gep <ptr>, -1, ...). 1495 if (V1Size != LocationSize::unknown() && 1496 V2Size != LocationSize::unknown()) { 1497 if ((-GEP1BaseOffset).ult(V1Size.getValue())) 1498 return PartialAlias; 1499 return NoAlias; 1500 } 1501 } 1502 } 1503 1504 if (!DecompGEP1.VarIndices.empty()) { 1505 APInt Modulo(MaxPointerSize, 0); 1506 bool AllPositive = true; 1507 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1508 1509 // Try to distinguish something like &A[i][1] against &A[42][0]. 1510 // Grab the least significant bit set in any of the scales. We 1511 // don't need std::abs here (even if the scale's negative) as we'll 1512 // be ^'ing Modulo with itself later. 1513 Modulo |= DecompGEP1.VarIndices[i].Scale; 1514 1515 if (AllPositive) { 1516 // If the Value could change between cycles, then any reasoning about 1517 // the Value this cycle may not hold in the next cycle. We'll just 1518 // give up if we can't determine conditions that hold for every cycle: 1519 const Value *V = DecompGEP1.VarIndices[i].V; 1520 1521 KnownBits Known = 1522 computeKnownBits(V, DL, 0, &AC, dyn_cast<Instruction>(GEP1), DT); 1523 bool SignKnownZero = Known.isNonNegative(); 1524 bool SignKnownOne = Known.isNegative(); 1525 1526 // Zero-extension widens the variable, and so forces the sign 1527 // bit to zero. 1528 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V); 1529 SignKnownZero |= IsZExt; 1530 SignKnownOne &= !IsZExt; 1531 1532 // If the variable begins with a zero then we know it's 1533 // positive, regardless of whether the value is signed or 1534 // unsigned. 1535 APInt Scale = DecompGEP1.VarIndices[i].Scale; 1536 AllPositive = 1537 (SignKnownZero && Scale.sge(0)) || (SignKnownOne && Scale.slt(0)); 1538 } 1539 } 1540 1541 Modulo = Modulo ^ (Modulo & (Modulo - 1)); 1542 1543 // We can compute the difference between the two addresses 1544 // mod Modulo. Check whether that difference guarantees that the 1545 // two locations do not alias. 1546 APInt ModOffset = GEP1BaseOffset & (Modulo - 1); 1547 if (V1Size != LocationSize::unknown() && 1548 V2Size != LocationSize::unknown() && ModOffset.uge(V2Size.getValue()) && 1549 (Modulo - ModOffset).uge(V1Size.getValue())) 1550 return NoAlias; 1551 1552 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr. 1553 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers 1554 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr. 1555 if (AllPositive && GEP1BaseOffsetMin.sgt(0) && 1556 V2Size != LocationSize::unknown() && 1557 GEP1BaseOffsetMin.uge(V2Size.getValue())) { 1558 return NoAlias; 1559 } 1560 1561 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size, 1562 GEP1BaseOffset, &AC, DT)) 1563 return NoAlias; 1564 } 1565 1566 // Statically, we can see that the base objects are the same, but the 1567 // pointers have dynamic offsets which we can't resolve. And none of our 1568 // little tricks above worked. 1569 return MayAlias; 1570 } 1571 1572 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1573 // If the results agree, take it. 1574 if (A == B) 1575 return A; 1576 // A mix of PartialAlias and MustAlias is PartialAlias. 1577 if ((A == PartialAlias && B == MustAlias) || 1578 (B == PartialAlias && A == MustAlias)) 1579 return PartialAlias; 1580 // Otherwise, we don't know anything. 1581 return MayAlias; 1582 } 1583 1584 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1585 /// against another. 1586 AliasResult 1587 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, 1588 const AAMDNodes &SIAAInfo, const Value *V2, 1589 LocationSize V2Size, const AAMDNodes &V2AAInfo, 1590 const Value *UnderV2, AAQueryInfo &AAQI) { 1591 // If the values are Selects with the same condition, we can do a more precise 1592 // check: just check for aliases between the values on corresponding arms. 1593 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1594 if (SI->getCondition() == SI2->getCondition()) { 1595 AliasResult Alias = 1596 aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, SI2->getTrueValue(), 1597 V2Size, V2AAInfo, AAQI); 1598 if (Alias == MayAlias) 1599 return MayAlias; 1600 AliasResult ThisAlias = 1601 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo, 1602 SI2->getFalseValue(), V2Size, V2AAInfo, AAQI); 1603 return MergeAliasResults(ThisAlias, Alias); 1604 } 1605 1606 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1607 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1608 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), 1609 SISize, SIAAInfo, AAQI, UnderV2); 1610 if (Alias == MayAlias) 1611 return MayAlias; 1612 1613 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), 1614 SISize, SIAAInfo, AAQI, UnderV2); 1615 return MergeAliasResults(ThisAlias, Alias); 1616 } 1617 1618 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1619 /// another. 1620 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, 1621 const AAMDNodes &PNAAInfo, const Value *V2, 1622 LocationSize V2Size, 1623 const AAMDNodes &V2AAInfo, 1624 const Value *UnderV2, AAQueryInfo &AAQI) { 1625 // Track phi nodes we have visited. We use this information when we determine 1626 // value equivalence. 1627 VisitedPhiBBs.insert(PN->getParent()); 1628 1629 // If the values are PHIs in the same block, we can do a more precise 1630 // as well as efficient check: just check for aliases between the values 1631 // on corresponding edges. 1632 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1633 if (PN2->getParent() == PN->getParent()) { 1634 AAQueryInfo::LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo), 1635 MemoryLocation(V2, V2Size, V2AAInfo)); 1636 if (PN > V2) 1637 std::swap(Locs.first, Locs.second); 1638 // Analyse the PHIs' inputs under the assumption that the PHIs are 1639 // NoAlias. 1640 // If the PHIs are May/MustAlias there must be (recursively) an input 1641 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or 1642 // there must be an operation on the PHIs within the PHIs' value cycle 1643 // that causes a MayAlias. 1644 // Pretend the phis do not alias. 1645 AliasResult Alias = NoAlias; 1646 AliasResult OrigAliasResult; 1647 { 1648 // Limited lifetime iterator invalidated by the aliasCheck call below. 1649 auto CacheIt = AAQI.AliasCache.find(Locs); 1650 assert((CacheIt != AAQI.AliasCache.end()) && 1651 "There must exist an entry for the phi node"); 1652 OrigAliasResult = CacheIt->second; 1653 CacheIt->second = NoAlias; 1654 } 1655 1656 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1657 AliasResult ThisAlias = 1658 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo, 1659 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1660 V2Size, V2AAInfo, AAQI); 1661 Alias = MergeAliasResults(ThisAlias, Alias); 1662 if (Alias == MayAlias) 1663 break; 1664 } 1665 1666 // Reset if speculation failed. 1667 if (Alias != NoAlias) { 1668 auto Pair = 1669 AAQI.AliasCache.insert(std::make_pair(Locs, OrigAliasResult)); 1670 assert(!Pair.second && "Entry must have existed"); 1671 Pair.first->second = OrigAliasResult; 1672 } 1673 return Alias; 1674 } 1675 1676 SmallVector<Value *, 4> V1Srcs; 1677 bool isRecursive = false; 1678 if (PV) { 1679 // If we have PhiValues then use it to get the underlying phi values. 1680 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN); 1681 // If we have more phi values than the search depth then return MayAlias 1682 // conservatively to avoid compile time explosion. The worst possible case 1683 // is if both sides are PHI nodes. In which case, this is O(m x n) time 1684 // where 'm' and 'n' are the number of PHI sources. 1685 if (PhiValueSet.size() > MaxLookupSearchDepth) 1686 return MayAlias; 1687 // Add the values to V1Srcs 1688 for (Value *PV1 : PhiValueSet) { 1689 if (EnableRecPhiAnalysis) { 1690 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) { 1691 // Check whether the incoming value is a GEP that advances the pointer 1692 // result of this PHI node (e.g. in a loop). If this is the case, we 1693 // would recurse and always get a MayAlias. Handle this case specially 1694 // below. 1695 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 && 1696 isa<ConstantInt>(PV1GEP->idx_begin())) { 1697 isRecursive = true; 1698 continue; 1699 } 1700 } 1701 } 1702 V1Srcs.push_back(PV1); 1703 } 1704 } else { 1705 // If we don't have PhiInfo then just look at the operands of the phi itself 1706 // FIXME: Remove this once we can guarantee that we have PhiInfo always 1707 SmallPtrSet<Value *, 4> UniqueSrc; 1708 for (Value *PV1 : PN->incoming_values()) { 1709 if (isa<PHINode>(PV1)) 1710 // If any of the source itself is a PHI, return MayAlias conservatively 1711 // to avoid compile time explosion. The worst possible case is if both 1712 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1713 // and 'n' are the number of PHI sources. 1714 return MayAlias; 1715 1716 if (EnableRecPhiAnalysis) 1717 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) { 1718 // Check whether the incoming value is a GEP that advances the pointer 1719 // result of this PHI node (e.g. in a loop). If this is the case, we 1720 // would recurse and always get a MayAlias. Handle this case specially 1721 // below. 1722 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 && 1723 isa<ConstantInt>(PV1GEP->idx_begin())) { 1724 isRecursive = true; 1725 continue; 1726 } 1727 } 1728 1729 if (UniqueSrc.insert(PV1).second) 1730 V1Srcs.push_back(PV1); 1731 } 1732 } 1733 1734 // If V1Srcs is empty then that means that the phi has no underlying non-phi 1735 // value. This should only be possible in blocks unreachable from the entry 1736 // block, but return MayAlias just in case. 1737 if (V1Srcs.empty()) 1738 return MayAlias; 1739 1740 // If this PHI node is recursive, set the size of the accessed memory to 1741 // unknown to represent all the possible values the GEP could advance the 1742 // pointer to. 1743 if (isRecursive) 1744 PNSize = LocationSize::unknown(); 1745 1746 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize, 1747 PNAAInfo, AAQI, UnderV2); 1748 1749 // Early exit if the check of the first PHI source against V2 is MayAlias. 1750 // Other results are not possible. 1751 if (Alias == MayAlias) 1752 return MayAlias; 1753 1754 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1755 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1756 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1757 Value *V = V1Srcs[i]; 1758 1759 AliasResult ThisAlias = 1760 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, AAQI, UnderV2); 1761 Alias = MergeAliasResults(ThisAlias, Alias); 1762 if (Alias == MayAlias) 1763 break; 1764 } 1765 1766 return Alias; 1767 } 1768 1769 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1770 /// array references. 1771 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, 1772 AAMDNodes V1AAInfo, const Value *V2, 1773 LocationSize V2Size, AAMDNodes V2AAInfo, 1774 AAQueryInfo &AAQI, const Value *O1, 1775 const Value *O2) { 1776 // If either of the memory references is empty, it doesn't matter what the 1777 // pointer values are. 1778 if (V1Size.isZero() || V2Size.isZero()) 1779 return NoAlias; 1780 1781 // Strip off any casts if they exist. 1782 V1 = V1->stripPointerCastsAndInvariantGroups(); 1783 V2 = V2->stripPointerCastsAndInvariantGroups(); 1784 1785 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1786 // value for undef that aliases nothing in the program. 1787 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1788 return NoAlias; 1789 1790 // Are we checking for alias of the same value? 1791 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1792 // different iterations. We must therefore make sure that this is not the 1793 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1794 // happen by looking at the visited phi nodes and making sure they cannot 1795 // reach the value. 1796 if (isValueEqualInPotentialCycles(V1, V2)) 1797 return MustAlias; 1798 1799 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1800 return NoAlias; // Scalars cannot alias each other 1801 1802 // Figure out what objects these things are pointing to if we can. 1803 if (O1 == nullptr) 1804 O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth); 1805 1806 if (O2 == nullptr) 1807 O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth); 1808 1809 // Null values in the default address space don't point to any object, so they 1810 // don't alias any other pointer. 1811 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1812 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1813 return NoAlias; 1814 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1815 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1816 return NoAlias; 1817 1818 if (O1 != O2) { 1819 // If V1/V2 point to two different objects, we know that we have no alias. 1820 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1821 return NoAlias; 1822 1823 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1824 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1825 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1826 return NoAlias; 1827 1828 // Function arguments can't alias with things that are known to be 1829 // unambigously identified at the function level. 1830 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1831 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1832 return NoAlias; 1833 1834 // If one pointer is the result of a call/invoke or load and the other is a 1835 // non-escaping local object within the same function, then we know the 1836 // object couldn't escape to a point where the call could return it. 1837 // 1838 // Note that if the pointers are in different functions, there are a 1839 // variety of complications. A call with a nocapture argument may still 1840 // temporary store the nocapture argument's value in a temporary memory 1841 // location if that memory location doesn't escape. Or it may pass a 1842 // nocapture value to other functions as long as they don't capture it. 1843 if (isEscapeSource(O1) && 1844 isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache)) 1845 return NoAlias; 1846 if (isEscapeSource(O2) && 1847 isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache)) 1848 return NoAlias; 1849 } 1850 1851 // If the size of one access is larger than the entire object on the other 1852 // side, then we know such behavior is undefined and can assume no alias. 1853 bool NullIsValidLocation = NullPointerIsDefined(&F); 1854 if ((isObjectSmallerThan( 1855 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL, 1856 TLI, NullIsValidLocation)) || 1857 (isObjectSmallerThan( 1858 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL, 1859 TLI, NullIsValidLocation))) 1860 return NoAlias; 1861 1862 // Check the cache before climbing up use-def chains. This also terminates 1863 // otherwise infinitely recursive queries. 1864 AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo), 1865 MemoryLocation(V2, V2Size, V2AAInfo)); 1866 if (V1 > V2) 1867 std::swap(Locs.first, Locs.second); 1868 std::pair<AAQueryInfo::AliasCacheT::iterator, bool> Pair = 1869 AAQI.AliasCache.try_emplace(Locs, MayAlias); 1870 if (!Pair.second) 1871 return Pair.first->second; 1872 1873 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the 1874 // GEP can't simplify, we don't even look at the PHI cases. 1875 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) { 1876 std::swap(V1, V2); 1877 std::swap(V1Size, V2Size); 1878 std::swap(O1, O2); 1879 std::swap(V1AAInfo, V2AAInfo); 1880 } 1881 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1882 AliasResult Result = 1883 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI); 1884 if (Result != MayAlias) { 1885 auto ItInsPair = AAQI.AliasCache.insert(std::make_pair(Locs, Result)); 1886 assert(!ItInsPair.second && "Entry must have existed"); 1887 ItInsPair.first->second = Result; 1888 return Result; 1889 } 1890 } 1891 1892 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { 1893 std::swap(V1, V2); 1894 std::swap(O1, O2); 1895 std::swap(V1Size, V2Size); 1896 std::swap(V1AAInfo, V2AAInfo); 1897 } 1898 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1899 AliasResult Result = 1900 aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI); 1901 if (Result != MayAlias) { 1902 Pair = AAQI.AliasCache.try_emplace(Locs, Result); 1903 assert(!Pair.second && "Entry must have existed"); 1904 return Pair.first->second = Result; 1905 } 1906 } 1907 1908 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { 1909 std::swap(V1, V2); 1910 std::swap(O1, O2); 1911 std::swap(V1Size, V2Size); 1912 std::swap(V1AAInfo, V2AAInfo); 1913 } 1914 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1915 AliasResult Result = 1916 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI); 1917 if (Result != MayAlias) { 1918 Pair = AAQI.AliasCache.try_emplace(Locs, Result); 1919 assert(!Pair.second && "Entry must have existed"); 1920 return Pair.first->second = Result; 1921 } 1922 } 1923 1924 // If both pointers are pointing into the same object and one of them 1925 // accesses the entire object, then the accesses must overlap in some way. 1926 if (O1 == O2) 1927 if (V1Size.isPrecise() && V2Size.isPrecise() && 1928 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || 1929 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) { 1930 Pair = AAQI.AliasCache.try_emplace(Locs, PartialAlias); 1931 assert(!Pair.second && "Entry must have existed"); 1932 return Pair.first->second = PartialAlias; 1933 } 1934 1935 // Recurse back into the best AA results we have, potentially with refined 1936 // memory locations. We have already ensured that BasicAA has a MayAlias 1937 // cache result for these, so any recursion back into BasicAA won't loop. 1938 AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second, AAQI); 1939 Pair = AAQI.AliasCache.try_emplace(Locs, Result); 1940 assert(!Pair.second && "Entry must have existed"); 1941 return Pair.first->second = Result; 1942 } 1943 1944 /// Check whether two Values can be considered equivalent. 1945 /// 1946 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1947 /// they can not be part of a cycle in the value graph by looking at all 1948 /// visited phi nodes an making sure that the phis cannot reach the value. We 1949 /// have to do this because we are looking through phi nodes (That is we say 1950 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1951 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1952 const Value *V2) { 1953 if (V != V2) 1954 return false; 1955 1956 const Instruction *Inst = dyn_cast<Instruction>(V); 1957 if (!Inst) 1958 return true; 1959 1960 if (VisitedPhiBBs.empty()) 1961 return true; 1962 1963 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1964 return false; 1965 1966 // Make sure that the visited phis cannot reach the Value. This ensures that 1967 // the Values cannot come from different iterations of a potential cycle the 1968 // phi nodes could be involved in. 1969 for (auto *P : VisitedPhiBBs) 1970 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT, LI)) 1971 return false; 1972 1973 return true; 1974 } 1975 1976 /// Computes the symbolic difference between two de-composed GEPs. 1977 /// 1978 /// Dest and Src are the variable indices from two decomposed GetElementPtr 1979 /// instructions GEP1 and GEP2 which have common base pointers. 1980 void BasicAAResult::GetIndexDifference( 1981 SmallVectorImpl<VariableGEPIndex> &Dest, 1982 const SmallVectorImpl<VariableGEPIndex> &Src) { 1983 if (Src.empty()) 1984 return; 1985 1986 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 1987 const Value *V = Src[i].V; 1988 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits; 1989 APInt Scale = Src[i].Scale; 1990 1991 // Find V in Dest. This is N^2, but pointer indices almost never have more 1992 // than a few variable indexes. 1993 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 1994 if (!isValueEqualInPotentialCycles(Dest[j].V, V) || 1995 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits) 1996 continue; 1997 1998 // If we found it, subtract off Scale V's from the entry in Dest. If it 1999 // goes to zero, remove the entry. 2000 if (Dest[j].Scale != Scale) 2001 Dest[j].Scale -= Scale; 2002 else 2003 Dest.erase(Dest.begin() + j); 2004 Scale = 0; 2005 break; 2006 } 2007 2008 // If we didn't consume this entry, add it to the end of the Dest list. 2009 if (!!Scale) { 2010 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale}; 2011 Dest.push_back(Entry); 2012 } 2013 } 2014 } 2015 2016 bool BasicAAResult::constantOffsetHeuristic( 2017 const SmallVectorImpl<VariableGEPIndex> &VarIndices, 2018 LocationSize MaybeV1Size, LocationSize MaybeV2Size, APInt BaseOffset, 2019 AssumptionCache *AC, DominatorTree *DT) { 2020 if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() || 2021 MaybeV2Size == LocationSize::unknown()) 2022 return false; 2023 2024 const uint64_t V1Size = MaybeV1Size.getValue(); 2025 const uint64_t V2Size = MaybeV2Size.getValue(); 2026 2027 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1]; 2028 2029 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits || 2030 Var0.Scale != -Var1.Scale) 2031 return false; 2032 2033 unsigned Width = Var1.V->getType()->getIntegerBitWidth(); 2034 2035 // We'll strip off the Extensions of Var0 and Var1 and do another round 2036 // of GetLinearExpression decomposition. In the example above, if Var0 2037 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 2038 2039 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0), 2040 V1Offset(Width, 0); 2041 bool NSW = true, NUW = true; 2042 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0; 2043 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits, 2044 V0SExtBits, DL, 0, AC, DT, NSW, NUW); 2045 NSW = true; 2046 NUW = true; 2047 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits, 2048 V1SExtBits, DL, 0, AC, DT, NSW, NUW); 2049 2050 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits || 2051 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1)) 2052 return false; 2053 2054 // We have a hit - Var0 and Var1 only differ by a constant offset! 2055 2056 // If we've been sext'ed then zext'd the maximum difference between Var0 and 2057 // Var1 is possible to calculate, but we're just interested in the absolute 2058 // minimum difference between the two. The minimum distance may occur due to 2059 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 2060 // the minimum distance between %i and %i + 5 is 3. 2061 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff; 2062 MinDiff = APIntOps::umin(MinDiff, Wrapped); 2063 APInt MinDiffBytes = 2064 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs(); 2065 2066 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 2067 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 2068 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 2069 // V2Size can fit in the MinDiffBytes gap. 2070 return MinDiffBytes.uge(V1Size + BaseOffset.abs()) && 2071 MinDiffBytes.uge(V2Size + BaseOffset.abs()); 2072 } 2073 2074 //===----------------------------------------------------------------------===// 2075 // BasicAliasAnalysis Pass 2076 //===----------------------------------------------------------------------===// 2077 2078 AnalysisKey BasicAA::Key; 2079 2080 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 2081 return BasicAAResult(F.getParent()->getDataLayout(), 2082 F, 2083 AM.getResult<TargetLibraryAnalysis>(F), 2084 AM.getResult<AssumptionAnalysis>(F), 2085 &AM.getResult<DominatorTreeAnalysis>(F), 2086 AM.getCachedResult<LoopAnalysis>(F), 2087 AM.getCachedResult<PhiValuesAnalysis>(F)); 2088 } 2089 2090 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 2091 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 2092 } 2093 2094 char BasicAAWrapperPass::ID = 0; 2095 2096 void BasicAAWrapperPass::anchor() {} 2097 2098 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa", 2099 "Basic Alias Analysis (stateless AA impl)", true, true) 2100 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2101 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2102 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2103 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 2104 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa", 2105 "Basic Alias Analysis (stateless AA impl)", true, true) 2106 2107 FunctionPass *llvm::createBasicAAWrapperPass() { 2108 return new BasicAAWrapperPass(); 2109 } 2110 2111 bool BasicAAWrapperPass::runOnFunction(Function &F) { 2112 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 2113 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 2114 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 2115 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 2116 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>(); 2117 2118 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, 2119 TLIWP.getTLI(F), ACT.getAssumptionCache(F), 2120 &DTWP.getDomTree(), 2121 LIWP ? &LIWP->getLoopInfo() : nullptr, 2122 PVWP ? &PVWP->getResult() : nullptr)); 2123 2124 return false; 2125 } 2126 2127 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 2128 AU.setPreservesAll(); 2129 AU.addRequired<AssumptionCacheTracker>(); 2130 AU.addRequired<DominatorTreeWrapperPass>(); 2131 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2132 AU.addUsedIfAvailable<PhiValuesWrapperPass>(); 2133 } 2134 2135 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 2136 return BasicAAResult( 2137 F.getParent()->getDataLayout(), F, 2138 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 2139 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 2140 } 2141