1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the primary stateless implementation of the 10 // Alias Analysis interface that implements identities (two different 11 // globals cannot alias, etc), but does no stateful analysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/BasicAliasAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ScopeExit.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/CFG.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/MemoryLocation.h" 28 #include "llvm/Analysis/PhiValues.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/Dominators.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/IR/GetElementPtrTypeIterator.h" 40 #include "llvm/IR/GlobalAlias.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/InstrTypes.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/IR/Intrinsics.h" 47 #include "llvm/IR/Metadata.h" 48 #include "llvm/IR/Operator.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/IR/User.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/InitializePasses.h" 53 #include "llvm/Pass.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/KnownBits.h" 58 #include <cassert> 59 #include <cstdint> 60 #include <cstdlib> 61 #include <utility> 62 63 #define DEBUG_TYPE "basicaa" 64 65 using namespace llvm; 66 67 /// Enable analysis of recursive PHI nodes. 68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, 69 cl::init(true)); 70 71 /// By default, even on 32-bit architectures we use 64-bit integers for 72 /// calculations. This will allow us to more-aggressively decompose indexing 73 /// expressions calculated using i64 values (e.g., long long in C) which is 74 /// common enough to worry about. 75 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b", 76 cl::Hidden, cl::init(true)); 77 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits", 78 cl::Hidden, cl::init(false)); 79 80 /// SearchLimitReached / SearchTimes shows how often the limit of 81 /// to decompose GEPs is reached. It will affect the precision 82 /// of basic alias analysis. 83 STATISTIC(SearchLimitReached, "Number of times the limit to " 84 "decompose GEPs is reached"); 85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 86 87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 89 /// careful with value equivalence. We use reachability to make sure a value 90 /// cannot be involved in a cycle. 91 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 92 93 // The max limit of the search depth in DecomposeGEPExpression() and 94 // getUnderlyingObject(), both functions need to use the same search 95 // depth otherwise the algorithm in aliasGEP will assert. 96 static const unsigned MaxLookupSearchDepth = 6; 97 98 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, 99 FunctionAnalysisManager::Invalidator &Inv) { 100 // We don't care if this analysis itself is preserved, it has no state. But 101 // we need to check that the analyses it depends on have been. Note that we 102 // may be created without handles to some analyses and in that case don't 103 // depend on them. 104 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) || 105 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) || 106 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA))) 107 return true; 108 109 // Otherwise this analysis result remains valid. 110 return false; 111 } 112 113 //===----------------------------------------------------------------------===// 114 // Useful predicates 115 //===----------------------------------------------------------------------===// 116 117 /// Returns true if the pointer is one which would have been considered an 118 /// escape by isNonEscapingLocalObject. 119 static bool isEscapeSource(const Value *V) { 120 if (isa<CallBase>(V)) 121 return true; 122 123 if (isa<Argument>(V)) 124 return true; 125 126 // The load case works because isNonEscapingLocalObject considers all 127 // stores to be escapes (it passes true for the StoreCaptures argument 128 // to PointerMayBeCaptured). 129 if (isa<LoadInst>(V)) 130 return true; 131 132 return false; 133 } 134 135 /// Returns the size of the object specified by V or UnknownSize if unknown. 136 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 137 const TargetLibraryInfo &TLI, 138 bool NullIsValidLoc, 139 bool RoundToAlign = false) { 140 uint64_t Size; 141 ObjectSizeOpts Opts; 142 Opts.RoundToAlign = RoundToAlign; 143 Opts.NullIsUnknownSize = NullIsValidLoc; 144 if (getObjectSize(V, Size, DL, &TLI, Opts)) 145 return Size; 146 return MemoryLocation::UnknownSize; 147 } 148 149 /// Returns true if we can prove that the object specified by V is smaller than 150 /// Size. 151 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 152 const DataLayout &DL, 153 const TargetLibraryInfo &TLI, 154 bool NullIsValidLoc) { 155 // Note that the meanings of the "object" are slightly different in the 156 // following contexts: 157 // c1: llvm::getObjectSize() 158 // c2: llvm.objectsize() intrinsic 159 // c3: isObjectSmallerThan() 160 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 161 // refers to the "entire object". 162 // 163 // Consider this example: 164 // char *p = (char*)malloc(100) 165 // char *q = p+80; 166 // 167 // In the context of c1 and c2, the "object" pointed by q refers to the 168 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 169 // 170 // However, in the context of c3, the "object" refers to the chunk of memory 171 // being allocated. So, the "object" has 100 bytes, and q points to the middle 172 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 173 // parameter, before the llvm::getObjectSize() is called to get the size of 174 // entire object, we should: 175 // - either rewind the pointer q to the base-address of the object in 176 // question (in this case rewind to p), or 177 // - just give up. It is up to caller to make sure the pointer is pointing 178 // to the base address the object. 179 // 180 // We go for 2nd option for simplicity. 181 if (!isIdentifiedObject(V)) 182 return false; 183 184 // This function needs to use the aligned object size because we allow 185 // reads a bit past the end given sufficient alignment. 186 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, 187 /*RoundToAlign*/ true); 188 189 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 190 } 191 192 /// Return the minimal extent from \p V to the end of the underlying object, 193 /// assuming the result is used in an aliasing query. E.g., we do use the query 194 /// location size and the fact that null pointers cannot alias here. 195 static uint64_t getMinimalExtentFrom(const Value &V, 196 const LocationSize &LocSize, 197 const DataLayout &DL, 198 bool NullIsValidLoc) { 199 // If we have dereferenceability information we know a lower bound for the 200 // extent as accesses for a lower offset would be valid. We need to exclude 201 // the "or null" part if null is a valid pointer. 202 bool CanBeNull; 203 uint64_t DerefBytes = V.getPointerDereferenceableBytes(DL, CanBeNull); 204 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; 205 // If queried with a precise location size, we assume that location size to be 206 // accessed, thus valid. 207 if (LocSize.isPrecise()) 208 DerefBytes = std::max(DerefBytes, LocSize.getValue()); 209 return DerefBytes; 210 } 211 212 /// Returns true if we can prove that the object specified by V has size Size. 213 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 214 const TargetLibraryInfo &TLI, bool NullIsValidLoc) { 215 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc); 216 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 217 } 218 219 //===----------------------------------------------------------------------===// 220 // GetElementPtr Instruction Decomposition and Analysis 221 //===----------------------------------------------------------------------===// 222 223 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 224 /// B are constant integers. 225 /// 226 /// Returns the scale and offset values as APInts and return V as a Value*, and 227 /// return whether we looked through any sign or zero extends. The incoming 228 /// Value is known to have IntegerType, and it may already be sign or zero 229 /// extended. 230 /// 231 /// Note that this looks through extends, so the high bits may not be 232 /// represented in the result. 233 /*static*/ const Value *BasicAAResult::GetLinearExpression( 234 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits, 235 unsigned &SExtBits, const DataLayout &DL, unsigned Depth, 236 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) { 237 assert(V->getType()->isIntegerTy() && "Not an integer value"); 238 239 // Limit our recursion depth. 240 if (Depth == 6) { 241 Scale = 1; 242 Offset = 0; 243 return V; 244 } 245 246 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) { 247 // If it's a constant, just convert it to an offset and remove the variable. 248 // If we've been called recursively, the Offset bit width will be greater 249 // than the constant's (the Offset's always as wide as the outermost call), 250 // so we'll zext here and process any extension in the isa<SExtInst> & 251 // isa<ZExtInst> cases below. 252 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth()); 253 assert(Scale == 0 && "Constant values don't have a scale"); 254 return V; 255 } 256 257 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 258 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 259 // If we've been called recursively, then Offset and Scale will be wider 260 // than the BOp operands. We'll always zext it here as we'll process sign 261 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases). 262 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth()); 263 264 switch (BOp->getOpcode()) { 265 default: 266 // We don't understand this instruction, so we can't decompose it any 267 // further. 268 Scale = 1; 269 Offset = 0; 270 return V; 271 case Instruction::Or: 272 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 273 // analyze it. 274 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 275 BOp, DT)) { 276 Scale = 1; 277 Offset = 0; 278 return V; 279 } 280 LLVM_FALLTHROUGH; 281 case Instruction::Add: 282 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 283 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 284 Offset += RHS; 285 break; 286 case Instruction::Sub: 287 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 288 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 289 Offset -= RHS; 290 break; 291 case Instruction::Mul: 292 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 293 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 294 Offset *= RHS; 295 Scale *= RHS; 296 break; 297 case Instruction::Shl: 298 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 299 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 300 301 // We're trying to linearize an expression of the kind: 302 // shl i8 -128, 36 303 // where the shift count exceeds the bitwidth of the type. 304 // We can't decompose this further (the expression would return 305 // a poison value). 306 if (Offset.getBitWidth() < RHS.getLimitedValue() || 307 Scale.getBitWidth() < RHS.getLimitedValue()) { 308 Scale = 1; 309 Offset = 0; 310 return V; 311 } 312 313 Offset <<= RHS.getLimitedValue(); 314 Scale <<= RHS.getLimitedValue(); 315 // the semantics of nsw and nuw for left shifts don't match those of 316 // multiplications, so we won't propagate them. 317 NSW = NUW = false; 318 return V; 319 } 320 321 if (isa<OverflowingBinaryOperator>(BOp)) { 322 NUW &= BOp->hasNoUnsignedWrap(); 323 NSW &= BOp->hasNoSignedWrap(); 324 } 325 return V; 326 } 327 } 328 329 // Since GEP indices are sign extended anyway, we don't care about the high 330 // bits of a sign or zero extended value - just scales and offsets. The 331 // extensions have to be consistent though. 332 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) { 333 Value *CastOp = cast<CastInst>(V)->getOperand(0); 334 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits(); 335 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 336 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits; 337 const Value *Result = 338 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL, 339 Depth + 1, AC, DT, NSW, NUW); 340 341 // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this 342 // by just incrementing the number of bits we've extended by. 343 unsigned ExtendedBy = NewWidth - SmallWidth; 344 345 if (isa<SExtInst>(V) && ZExtBits == 0) { 346 // sext(sext(%x, a), b) == sext(%x, a + b) 347 348 if (NSW) { 349 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c) 350 // into sext(%x) + sext(c). We'll sext the Offset ourselves: 351 unsigned OldWidth = Offset.getBitWidth(); 352 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth); 353 } else { 354 // We may have signed-wrapped, so don't decompose sext(%x + c) into 355 // sext(%x) + sext(c) 356 Scale = 1; 357 Offset = 0; 358 Result = CastOp; 359 ZExtBits = OldZExtBits; 360 SExtBits = OldSExtBits; 361 } 362 SExtBits += ExtendedBy; 363 } else { 364 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b) 365 366 if (!NUW) { 367 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into 368 // zext(%x) + zext(c) 369 Scale = 1; 370 Offset = 0; 371 Result = CastOp; 372 ZExtBits = OldZExtBits; 373 SExtBits = OldSExtBits; 374 } 375 ZExtBits += ExtendedBy; 376 } 377 378 return Result; 379 } 380 381 Scale = 1; 382 Offset = 0; 383 return V; 384 } 385 386 /// To ensure a pointer offset fits in an integer of size PointerSize 387 /// (in bits) when that size is smaller than the maximum pointer size. This is 388 /// an issue, for example, in particular for 32b pointers with negative indices 389 /// that rely on two's complement wrap-arounds for precise alias information 390 /// where the maximum pointer size is 64b. 391 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) { 392 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!"); 393 unsigned ShiftBits = Offset.getBitWidth() - PointerSize; 394 return (Offset << ShiftBits).ashr(ShiftBits); 395 } 396 397 static unsigned getMaxPointerSize(const DataLayout &DL) { 398 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits(); 399 if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64; 400 if (DoubleCalcBits) MaxPointerSize *= 2; 401 402 return MaxPointerSize; 403 } 404 405 /// If V is a symbolic pointer expression, decompose it into a base pointer 406 /// with a constant offset and a number of scaled symbolic offsets. 407 /// 408 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 409 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 410 /// specified amount, but which may have other unrepresented high bits. As 411 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 412 /// 413 /// This function is capable of analyzing everything that getUnderlyingObject 414 /// can look through. To be able to do that getUnderlyingObject and 415 /// DecomposeGEPExpression must use the same search depth 416 /// (MaxLookupSearchDepth). 417 BasicAAResult::DecomposedGEP 418 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL, 419 AssumptionCache *AC, DominatorTree *DT) { 420 // Limit recursion depth to limit compile time in crazy cases. 421 unsigned MaxLookup = MaxLookupSearchDepth; 422 SearchTimes++; 423 const Instruction *CxtI = dyn_cast<Instruction>(V); 424 425 unsigned MaxPointerSize = getMaxPointerSize(DL); 426 DecomposedGEP Decomposed; 427 Decomposed.Offset = APInt(MaxPointerSize, 0); 428 Decomposed.HasCompileTimeConstantScale = true; 429 do { 430 // See if this is a bitcast or GEP. 431 const Operator *Op = dyn_cast<Operator>(V); 432 if (!Op) { 433 // The only non-operator case we can handle are GlobalAliases. 434 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 435 if (!GA->isInterposable()) { 436 V = GA->getAliasee(); 437 continue; 438 } 439 } 440 Decomposed.Base = V; 441 return Decomposed; 442 } 443 444 if (Op->getOpcode() == Instruction::BitCast || 445 Op->getOpcode() == Instruction::AddrSpaceCast) { 446 V = Op->getOperand(0); 447 continue; 448 } 449 450 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 451 if (!GEPOp) { 452 if (const auto *PHI = dyn_cast<PHINode>(V)) { 453 // Look through single-arg phi nodes created by LCSSA. 454 if (PHI->getNumIncomingValues() == 1) { 455 V = PHI->getIncomingValue(0); 456 continue; 457 } 458 } else if (const auto *Call = dyn_cast<CallBase>(V)) { 459 // CaptureTracking can know about special capturing properties of some 460 // intrinsics like launder.invariant.group, that can't be expressed with 461 // the attributes, but have properties like returning aliasing pointer. 462 // Because some analysis may assume that nocaptured pointer is not 463 // returned from some special intrinsic (because function would have to 464 // be marked with returns attribute), it is crucial to use this function 465 // because it should be in sync with CaptureTracking. Not using it may 466 // cause weird miscompilations where 2 aliasing pointers are assumed to 467 // noalias. 468 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 469 V = RP; 470 continue; 471 } 472 } 473 474 Decomposed.Base = V; 475 return Decomposed; 476 } 477 478 // Track whether we've seen at least one in bounds gep, and if so, whether 479 // all geps parsed were in bounds. 480 if (Decomposed.InBounds == None) 481 Decomposed.InBounds = GEPOp->isInBounds(); 482 else if (!GEPOp->isInBounds()) 483 Decomposed.InBounds = false; 484 485 // Don't attempt to analyze GEPs over unsized objects. 486 if (!GEPOp->getSourceElementType()->isSized()) { 487 Decomposed.Base = V; 488 return Decomposed; 489 } 490 491 // Don't attempt to analyze GEPs if index scale is not a compile-time 492 // constant. 493 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) { 494 Decomposed.Base = V; 495 Decomposed.HasCompileTimeConstantScale = false; 496 return Decomposed; 497 } 498 499 unsigned AS = GEPOp->getPointerAddressSpace(); 500 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 501 gep_type_iterator GTI = gep_type_begin(GEPOp); 502 unsigned PointerSize = DL.getPointerSizeInBits(AS); 503 // Assume all GEP operands are constants until proven otherwise. 504 bool GepHasConstantOffset = true; 505 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 506 I != E; ++I, ++GTI) { 507 const Value *Index = *I; 508 // Compute the (potentially symbolic) offset in bytes for this index. 509 if (StructType *STy = GTI.getStructTypeOrNull()) { 510 // For a struct, add the member offset. 511 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 512 if (FieldNo == 0) 513 continue; 514 515 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo); 516 continue; 517 } 518 519 // For an array/pointer, add the element offset, explicitly scaled. 520 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 521 if (CIdx->isZero()) 522 continue; 523 Decomposed.Offset += 524 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() * 525 CIdx->getValue().sextOrTrunc(MaxPointerSize); 526 continue; 527 } 528 529 GepHasConstantOffset = false; 530 531 APInt Scale(MaxPointerSize, 532 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); 533 unsigned ZExtBits = 0, SExtBits = 0; 534 535 // If the integer type is smaller than the pointer size, it is implicitly 536 // sign extended to pointer size. 537 unsigned Width = Index->getType()->getIntegerBitWidth(); 538 if (PointerSize > Width) 539 SExtBits += PointerSize - Width; 540 541 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 542 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 543 bool NSW = true, NUW = true; 544 const Value *OrigIndex = Index; 545 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits, 546 SExtBits, DL, 0, AC, DT, NSW, NUW); 547 548 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 549 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 550 551 // It can be the case that, even through C1*V+C2 does not overflow for 552 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot 553 // decompose the expression in this way. 554 // 555 // FIXME: C1*Scale and the other operations in the decomposed 556 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this 557 // possibility. 558 bool Overflow; 559 APInt ScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize) 560 .smul_ov(Scale, Overflow); 561 if (Overflow) { 562 Index = OrigIndex; 563 IndexScale = 1; 564 IndexOffset = 0; 565 566 ZExtBits = SExtBits = 0; 567 if (PointerSize > Width) 568 SExtBits += PointerSize - Width; 569 } else { 570 Decomposed.Offset += ScaledOffset; 571 Scale *= IndexScale.sextOrTrunc(MaxPointerSize); 572 } 573 574 // If we already had an occurrence of this index variable, merge this 575 // scale into it. For example, we want to handle: 576 // A[x][x] -> x*16 + x*4 -> x*20 577 // This also ensures that 'x' only appears in the index list once. 578 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 579 if (Decomposed.VarIndices[i].V == Index && 580 Decomposed.VarIndices[i].ZExtBits == ZExtBits && 581 Decomposed.VarIndices[i].SExtBits == SExtBits) { 582 Scale += Decomposed.VarIndices[i].Scale; 583 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 584 break; 585 } 586 } 587 588 // Make sure that we have a scale that makes sense for this target's 589 // pointer size. 590 Scale = adjustToPointerSize(Scale, PointerSize); 591 592 if (!!Scale) { 593 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale, CxtI}; 594 Decomposed.VarIndices.push_back(Entry); 595 } 596 } 597 598 // Take care of wrap-arounds 599 if (GepHasConstantOffset) 600 Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize); 601 602 // Analyze the base pointer next. 603 V = GEPOp->getOperand(0); 604 } while (--MaxLookup); 605 606 // If the chain of expressions is too deep, just return early. 607 Decomposed.Base = V; 608 SearchLimitReached++; 609 return Decomposed; 610 } 611 612 /// Returns whether the given pointer value points to memory that is local to 613 /// the function, with global constants being considered local to all 614 /// functions. 615 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 616 AAQueryInfo &AAQI, bool OrLocal) { 617 assert(Visited.empty() && "Visited must be cleared after use!"); 618 619 unsigned MaxLookup = 8; 620 SmallVector<const Value *, 16> Worklist; 621 Worklist.push_back(Loc.Ptr); 622 do { 623 const Value *V = getUnderlyingObject(Worklist.pop_back_val()); 624 if (!Visited.insert(V).second) { 625 Visited.clear(); 626 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 627 } 628 629 // An alloca instruction defines local memory. 630 if (OrLocal && isa<AllocaInst>(V)) 631 continue; 632 633 // A global constant counts as local memory for our purposes. 634 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 635 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 636 // global to be marked constant in some modules and non-constant in 637 // others. GV may even be a declaration, not a definition. 638 if (!GV->isConstant()) { 639 Visited.clear(); 640 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 641 } 642 continue; 643 } 644 645 // If both select values point to local memory, then so does the select. 646 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 647 Worklist.push_back(SI->getTrueValue()); 648 Worklist.push_back(SI->getFalseValue()); 649 continue; 650 } 651 652 // If all values incoming to a phi node point to local memory, then so does 653 // the phi. 654 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 655 // Don't bother inspecting phi nodes with many operands. 656 if (PN->getNumIncomingValues() > MaxLookup) { 657 Visited.clear(); 658 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 659 } 660 append_range(Worklist, PN->incoming_values()); 661 continue; 662 } 663 664 // Otherwise be conservative. 665 Visited.clear(); 666 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 667 } while (!Worklist.empty() && --MaxLookup); 668 669 Visited.clear(); 670 return Worklist.empty(); 671 } 672 673 /// Returns the behavior when calling the given call site. 674 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { 675 if (Call->doesNotAccessMemory()) 676 // Can't do better than this. 677 return FMRB_DoesNotAccessMemory; 678 679 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 680 681 // If the callsite knows it only reads memory, don't return worse 682 // than that. 683 if (Call->onlyReadsMemory()) 684 Min = FMRB_OnlyReadsMemory; 685 else if (Call->doesNotReadMemory()) 686 Min = FMRB_OnlyWritesMemory; 687 688 if (Call->onlyAccessesArgMemory()) 689 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 690 else if (Call->onlyAccessesInaccessibleMemory()) 691 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 692 else if (Call->onlyAccessesInaccessibleMemOrArgMem()) 693 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 694 695 // If the call has operand bundles then aliasing attributes from the function 696 // it calls do not directly apply to the call. This can be made more precise 697 // in the future. 698 if (!Call->hasOperandBundles()) 699 if (const Function *F = Call->getCalledFunction()) 700 Min = 701 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 702 703 return Min; 704 } 705 706 /// Returns the behavior when calling the given function. For use when the call 707 /// site is not known. 708 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 709 // If the function declares it doesn't access memory, we can't do better. 710 if (F->doesNotAccessMemory()) 711 return FMRB_DoesNotAccessMemory; 712 713 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 714 715 // If the function declares it only reads memory, go with that. 716 if (F->onlyReadsMemory()) 717 Min = FMRB_OnlyReadsMemory; 718 else if (F->doesNotReadMemory()) 719 Min = FMRB_OnlyWritesMemory; 720 721 if (F->onlyAccessesArgMemory()) 722 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 723 else if (F->onlyAccessesInaccessibleMemory()) 724 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 725 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 726 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 727 728 return Min; 729 } 730 731 /// Returns true if this is a writeonly (i.e Mod only) parameter. 732 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, 733 const TargetLibraryInfo &TLI) { 734 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) 735 return true; 736 737 // We can bound the aliasing properties of memset_pattern16 just as we can 738 // for memcpy/memset. This is particularly important because the 739 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 740 // whenever possible. 741 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 742 // attributes. 743 LibFunc F; 744 if (Call->getCalledFunction() && 745 TLI.getLibFunc(*Call->getCalledFunction(), F) && 746 F == LibFunc_memset_pattern16 && TLI.has(F)) 747 if (ArgIdx == 0) 748 return true; 749 750 // TODO: memset_pattern4, memset_pattern8 751 // TODO: _chk variants 752 // TODO: strcmp, strcpy 753 754 return false; 755 } 756 757 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, 758 unsigned ArgIdx) { 759 // Checking for known builtin intrinsics and target library functions. 760 if (isWriteOnlyParam(Call, ArgIdx, TLI)) 761 return ModRefInfo::Mod; 762 763 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) 764 return ModRefInfo::Ref; 765 766 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) 767 return ModRefInfo::NoModRef; 768 769 return AAResultBase::getArgModRefInfo(Call, ArgIdx); 770 } 771 772 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { 773 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call); 774 return II && II->getIntrinsicID() == IID; 775 } 776 777 #ifndef NDEBUG 778 static const Function *getParent(const Value *V) { 779 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 780 if (!inst->getParent()) 781 return nullptr; 782 return inst->getParent()->getParent(); 783 } 784 785 if (const Argument *arg = dyn_cast<Argument>(V)) 786 return arg->getParent(); 787 788 return nullptr; 789 } 790 791 static bool notDifferentParent(const Value *O1, const Value *O2) { 792 793 const Function *F1 = getParent(O1); 794 const Function *F2 = getParent(O2); 795 796 return !F1 || !F2 || F1 == F2; 797 } 798 #endif 799 800 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 801 const MemoryLocation &LocB, 802 AAQueryInfo &AAQI) { 803 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 804 "BasicAliasAnalysis doesn't support interprocedural queries."); 805 return aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr, LocB.Size, 806 LocB.AATags, AAQI); 807 } 808 809 /// Checks to see if the specified callsite can clobber the specified memory 810 /// object. 811 /// 812 /// Since we only look at local properties of this function, we really can't 813 /// say much about this query. We do, however, use simple "address taken" 814 /// analysis on local objects. 815 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, 816 const MemoryLocation &Loc, 817 AAQueryInfo &AAQI) { 818 assert(notDifferentParent(Call, Loc.Ptr) && 819 "AliasAnalysis query involving multiple functions!"); 820 821 const Value *Object = getUnderlyingObject(Loc.Ptr); 822 823 // Calls marked 'tail' cannot read or write allocas from the current frame 824 // because the current frame might be destroyed by the time they run. However, 825 // a tail call may use an alloca with byval. Calling with byval copies the 826 // contents of the alloca into argument registers or stack slots, so there is 827 // no lifetime issue. 828 if (isa<AllocaInst>(Object)) 829 if (const CallInst *CI = dyn_cast<CallInst>(Call)) 830 if (CI->isTailCall() && 831 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 832 return ModRefInfo::NoModRef; 833 834 // Stack restore is able to modify unescaped dynamic allocas. Assume it may 835 // modify them even though the alloca is not escaped. 836 if (auto *AI = dyn_cast<AllocaInst>(Object)) 837 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore)) 838 return ModRefInfo::Mod; 839 840 // If the pointer is to a locally allocated object that does not escape, 841 // then the call can not mod/ref the pointer unless the call takes the pointer 842 // as an argument, and itself doesn't capture it. 843 if (!isa<Constant>(Object) && Call != Object && 844 isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) { 845 846 // Optimistically assume that call doesn't touch Object and check this 847 // assumption in the following loop. 848 ModRefInfo Result = ModRefInfo::NoModRef; 849 bool IsMustAlias = true; 850 851 unsigned OperandNo = 0; 852 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); 853 CI != CE; ++CI, ++OperandNo) { 854 // Only look at the no-capture or byval pointer arguments. If this 855 // pointer were passed to arguments that were neither of these, then it 856 // couldn't be no-capture. 857 if (!(*CI)->getType()->isPointerTy() || 858 (!Call->doesNotCapture(OperandNo) && 859 OperandNo < Call->getNumArgOperands() && 860 !Call->isByValArgument(OperandNo))) 861 continue; 862 863 // Call doesn't access memory through this operand, so we don't care 864 // if it aliases with Object. 865 if (Call->doesNotAccessMemory(OperandNo)) 866 continue; 867 868 // If this is a no-capture pointer argument, see if we can tell that it 869 // is impossible to alias the pointer we're checking. 870 AliasResult AR = getBestAAResults().alias( 871 MemoryLocation::getBeforeOrAfter(*CI), 872 MemoryLocation::getBeforeOrAfter(Object), AAQI); 873 if (AR != MustAlias) 874 IsMustAlias = false; 875 // Operand doesn't alias 'Object', continue looking for other aliases 876 if (AR == NoAlias) 877 continue; 878 // Operand aliases 'Object', but call doesn't modify it. Strengthen 879 // initial assumption and keep looking in case if there are more aliases. 880 if (Call->onlyReadsMemory(OperandNo)) { 881 Result = setRef(Result); 882 continue; 883 } 884 // Operand aliases 'Object' but call only writes into it. 885 if (Call->doesNotReadMemory(OperandNo)) { 886 Result = setMod(Result); 887 continue; 888 } 889 // This operand aliases 'Object' and call reads and writes into it. 890 // Setting ModRef will not yield an early return below, MustAlias is not 891 // used further. 892 Result = ModRefInfo::ModRef; 893 break; 894 } 895 896 // No operand aliases, reset Must bit. Add below if at least one aliases 897 // and all aliases found are MustAlias. 898 if (isNoModRef(Result)) 899 IsMustAlias = false; 900 901 // Early return if we improved mod ref information 902 if (!isModAndRefSet(Result)) { 903 if (isNoModRef(Result)) 904 return ModRefInfo::NoModRef; 905 return IsMustAlias ? setMust(Result) : clearMust(Result); 906 } 907 } 908 909 // If the call is malloc/calloc like, we can assume that it doesn't 910 // modify any IR visible value. This is only valid because we assume these 911 // routines do not read values visible in the IR. TODO: Consider special 912 // casing realloc and strdup routines which access only their arguments as 913 // well. Or alternatively, replace all of this with inaccessiblememonly once 914 // that's implemented fully. 915 if (isMallocOrCallocLikeFn(Call, &TLI)) { 916 // Be conservative if the accessed pointer may alias the allocation - 917 // fallback to the generic handling below. 918 if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), 919 Loc, AAQI) == NoAlias) 920 return ModRefInfo::NoModRef; 921 } 922 923 // The semantics of memcpy intrinsics either exactly overlap or do not 924 // overlap, i.e., source and destination of any given memcpy are either 925 // no-alias or must-alias. 926 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) { 927 AliasResult SrcAA = 928 getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI); 929 AliasResult DestAA = 930 getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI); 931 // It's also possible for Loc to alias both src and dest, or neither. 932 ModRefInfo rv = ModRefInfo::NoModRef; 933 if (SrcAA != NoAlias) 934 rv = setRef(rv); 935 if (DestAA != NoAlias) 936 rv = setMod(rv); 937 return rv; 938 } 939 940 // While the assume intrinsic is marked as arbitrarily writing so that 941 // proper control dependencies will be maintained, it never aliases any 942 // particular memory location. 943 if (isIntrinsicCall(Call, Intrinsic::assume)) 944 return ModRefInfo::NoModRef; 945 946 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 947 // that proper control dependencies are maintained but they never mods any 948 // particular memory location. 949 // 950 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 951 // heap state at the point the guard is issued needs to be consistent in case 952 // the guard invokes the "deopt" continuation. 953 if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) 954 return ModRefInfo::Ref; 955 // The same applies to deoptimize which is essentially a guard(false). 956 if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize)) 957 return ModRefInfo::Ref; 958 959 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 960 // writing so that proper control dependencies are maintained but they never 961 // mod any particular memory location visible to the IR. 962 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 963 // intrinsic is now modeled as reading memory. This prevents hoisting the 964 // invariant.start intrinsic over stores. Consider: 965 // *ptr = 40; 966 // *ptr = 50; 967 // invariant_start(ptr) 968 // int val = *ptr; 969 // print(val); 970 // 971 // This cannot be transformed to: 972 // 973 // *ptr = 40; 974 // invariant_start(ptr) 975 // *ptr = 50; 976 // int val = *ptr; 977 // print(val); 978 // 979 // The transformation will cause the second store to be ignored (based on 980 // rules of invariant.start) and print 40, while the first program always 981 // prints 50. 982 if (isIntrinsicCall(Call, Intrinsic::invariant_start)) 983 return ModRefInfo::Ref; 984 985 // The AAResultBase base class has some smarts, lets use them. 986 return AAResultBase::getModRefInfo(Call, Loc, AAQI); 987 } 988 989 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, 990 const CallBase *Call2, 991 AAQueryInfo &AAQI) { 992 // While the assume intrinsic is marked as arbitrarily writing so that 993 // proper control dependencies will be maintained, it never aliases any 994 // particular memory location. 995 if (isIntrinsicCall(Call1, Intrinsic::assume) || 996 isIntrinsicCall(Call2, Intrinsic::assume)) 997 return ModRefInfo::NoModRef; 998 999 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 1000 // that proper control dependencies are maintained but they never mod any 1001 // particular memory location. 1002 // 1003 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1004 // heap state at the point the guard is issued needs to be consistent in case 1005 // the guard invokes the "deopt" continuation. 1006 1007 // NB! This function is *not* commutative, so we special case two 1008 // possibilities for guard intrinsics. 1009 1010 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) 1011 return isModSet(createModRefInfo(getModRefBehavior(Call2))) 1012 ? ModRefInfo::Ref 1013 : ModRefInfo::NoModRef; 1014 1015 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) 1016 return isModSet(createModRefInfo(getModRefBehavior(Call1))) 1017 ? ModRefInfo::Mod 1018 : ModRefInfo::NoModRef; 1019 1020 // The AAResultBase base class has some smarts, lets use them. 1021 return AAResultBase::getModRefInfo(Call1, Call2, AAQI); 1022 } 1023 1024 /// Return true if we know V to the base address of the corresponding memory 1025 /// object. This implies that any address less than V must be out of bounds 1026 /// for the underlying object. Note that just being isIdentifiedObject() is 1027 /// not enough - For example, a negative offset from a noalias argument or call 1028 /// can be inbounds w.r.t the actual underlying object. 1029 static bool isBaseOfObject(const Value *V) { 1030 // TODO: We can handle other cases here 1031 // 1) For GC languages, arguments to functions are often required to be 1032 // base pointers. 1033 // 2) Result of allocation routines are often base pointers. Leverage TLI. 1034 return (isa<AllocaInst>(V) || isa<GlobalVariable>(V)); 1035 } 1036 1037 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1038 /// another pointer. 1039 /// 1040 /// We know that V1 is a GEP, but we don't know anything about V2. 1041 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for 1042 /// V2. 1043 AliasResult BasicAAResult::aliasGEP( 1044 const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo, 1045 const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo, 1046 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { 1047 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT); 1048 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT); 1049 1050 // Don't attempt to analyze the decomposed GEP if index scale is not a 1051 // compile-time constant. 1052 if (!DecompGEP1.HasCompileTimeConstantScale || 1053 !DecompGEP2.HasCompileTimeConstantScale) 1054 return MayAlias; 1055 1056 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 && 1057 "DecomposeGEPExpression returned a result different from " 1058 "getUnderlyingObject"); 1059 1060 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1061 // symbolic difference. 1062 DecompGEP1.Offset -= DecompGEP2.Offset; 1063 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices); 1064 1065 // If an inbounds GEP would have to start from an out of bounds address 1066 // for the two to alias, then we can assume noalias. 1067 if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() && 1068 V2Size.hasValue() && DecompGEP1.Offset.sge(V2Size.getValue()) && 1069 isBaseOfObject(DecompGEP2.Base)) 1070 return NoAlias; 1071 1072 if (isa<GEPOperator>(V2)) { 1073 // Symmetric case to above. 1074 if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() && 1075 V1Size.hasValue() && DecompGEP1.Offset.sle(-V1Size.getValue()) && 1076 isBaseOfObject(DecompGEP1.Base)) 1077 return NoAlias; 1078 } else { 1079 // TODO: This limitation exists for compile-time reasons. Relax it if we 1080 // can avoid exponential pathological cases. 1081 if (!V1Size.hasValue() && !V2Size.hasValue()) 1082 return MayAlias; 1083 } 1084 1085 // For GEPs with identical offsets, we can preserve the size and AAInfo 1086 // when performing the alias check on the underlying objects. 1087 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty()) 1088 return getBestAAResults().alias( 1089 MemoryLocation(UnderlyingV1, V1Size, V1AAInfo), 1090 MemoryLocation(UnderlyingV2, V2Size, V2AAInfo), AAQI); 1091 1092 // Do the base pointers alias? 1093 AliasResult BaseAlias = getBestAAResults().alias( 1094 MemoryLocation::getBeforeOrAfter(UnderlyingV1), 1095 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI); 1096 1097 // If we get a No or May, then return it immediately, no amount of analysis 1098 // will improve this situation. 1099 if (BaseAlias != MustAlias) { 1100 assert(BaseAlias == NoAlias || BaseAlias == MayAlias); 1101 return BaseAlias; 1102 } 1103 1104 // If there is a constant difference between the pointers, but the difference 1105 // is less than the size of the associated memory object, then we know 1106 // that the objects are partially overlapping. If the difference is 1107 // greater, we know they do not overlap. 1108 if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) { 1109 APInt &Off = DecompGEP1.Offset; 1110 1111 // Initialize for Off >= 0 (V2 <= GEP1) case. 1112 const Value *LeftPtr = V2; 1113 const Value *RightPtr = GEP1; 1114 LocationSize VLeftSize = V2Size; 1115 LocationSize VRightSize = V1Size; 1116 1117 if (Off.isNegative()) { 1118 // Swap if we have the situation where: 1119 // + + 1120 // | BaseOffset | 1121 // ---------------->| 1122 // |-->V1Size |-------> V2Size 1123 // GEP1 V2 1124 std::swap(LeftPtr, RightPtr); 1125 std::swap(VLeftSize, VRightSize); 1126 Off = -Off; 1127 } 1128 1129 if (VLeftSize.hasValue()) { 1130 const uint64_t LSize = VLeftSize.getValue(); 1131 if (Off.ult(LSize)) { 1132 // Conservatively drop processing if a phi was visited and/or offset is 1133 // too big. 1134 if (VisitedPhiBBs.empty() && VRightSize.hasValue() && 1135 Off.ule(INT64_MAX)) { 1136 // Memory referenced by right pointer is nested. Save the offset in 1137 // cache. 1138 const uint64_t RSize = VRightSize.getValue(); 1139 if ((Off + RSize).ule(LSize)) 1140 AAQI.setClobberOffset(LeftPtr, RightPtr, LSize, RSize, 1141 Off.getSExtValue()); 1142 } 1143 return PartialAlias; 1144 } 1145 return NoAlias; 1146 } 1147 } 1148 1149 if (!DecompGEP1.VarIndices.empty()) { 1150 APInt GCD; 1151 bool AllNonNegative = DecompGEP1.Offset.isNonNegative(); 1152 bool AllNonPositive = DecompGEP1.Offset.isNonPositive(); 1153 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1154 const APInt &Scale = DecompGEP1.VarIndices[i].Scale; 1155 if (i == 0) 1156 GCD = Scale.abs(); 1157 else 1158 GCD = APIntOps::GreatestCommonDivisor(GCD, Scale.abs()); 1159 1160 if (AllNonNegative || AllNonPositive) { 1161 // If the Value could change between cycles, then any reasoning about 1162 // the Value this cycle may not hold in the next cycle. We'll just 1163 // give up if we can't determine conditions that hold for every cycle: 1164 const Value *V = DecompGEP1.VarIndices[i].V; 1165 const Instruction *CxtI = DecompGEP1.VarIndices[i].CxtI; 1166 1167 KnownBits Known = computeKnownBits(V, DL, 0, &AC, CxtI, DT); 1168 bool SignKnownZero = Known.isNonNegative(); 1169 bool SignKnownOne = Known.isNegative(); 1170 1171 // Zero-extension widens the variable, and so forces the sign 1172 // bit to zero. 1173 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V); 1174 SignKnownZero |= IsZExt; 1175 SignKnownOne &= !IsZExt; 1176 1177 AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) || 1178 (SignKnownOne && Scale.isNonPositive()); 1179 AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) || 1180 (SignKnownOne && Scale.isNonNegative()); 1181 } 1182 } 1183 1184 // We now have accesses at two offsets from the same base: 1185 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size 1186 // 2. 0 with size V2Size 1187 // Using arithmetic modulo GCD, the accesses are at 1188 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits 1189 // into the range [V2Size..GCD), then we know they cannot overlap. 1190 APInt ModOffset = DecompGEP1.Offset.srem(GCD); 1191 if (ModOffset.isNegative()) 1192 ModOffset += GCD; // We want mod, not rem. 1193 if (V1Size.hasValue() && V2Size.hasValue() && 1194 ModOffset.uge(V2Size.getValue()) && 1195 (GCD - ModOffset).uge(V1Size.getValue())) 1196 return NoAlias; 1197 1198 // If we know all the variables are non-negative, then the total offset is 1199 // also non-negative and >= DecompGEP1.Offset. We have the following layout: 1200 // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size] 1201 // If DecompGEP1.Offset >= V2Size, the accesses don't alias. 1202 if (AllNonNegative && V2Size.hasValue() && 1203 DecompGEP1.Offset.uge(V2Size.getValue())) 1204 return NoAlias; 1205 // Similarly, if the variables are non-positive, then the total offset is 1206 // also non-positive and <= DecompGEP1.Offset. We have the following layout: 1207 // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size) 1208 // If -DecompGEP1.Offset >= V1Size, the accesses don't alias. 1209 if (AllNonPositive && V1Size.hasValue() && 1210 (-DecompGEP1.Offset).uge(V1Size.getValue())) 1211 return NoAlias; 1212 1213 if (V1Size.hasValue() && V2Size.hasValue()) { 1214 // Try to determine whether abs(VarIndex) > 0. 1215 Optional<APInt> MinAbsVarIndex; 1216 if (DecompGEP1.VarIndices.size() == 1) { 1217 // VarIndex = Scale*V. If V != 0 then abs(VarIndex) >= abs(Scale). 1218 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0]; 1219 if (isKnownNonZero(Var.V, DL, 0, &AC, Var.CxtI, DT)) 1220 MinAbsVarIndex = Var.Scale.abs(); 1221 } else if (DecompGEP1.VarIndices.size() == 2) { 1222 // VarIndex = Scale*V0 + (-Scale)*V1. 1223 // If V0 != V1 then abs(VarIndex) >= abs(Scale). 1224 // Check that VisitedPhiBBs is empty, to avoid reasoning about 1225 // inequality of values across loop iterations. 1226 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0]; 1227 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1]; 1228 if (Var0.Scale == -Var1.Scale && Var0.ZExtBits == Var1.ZExtBits && 1229 Var0.SExtBits == Var1.SExtBits && VisitedPhiBBs.empty() && 1230 isKnownNonEqual(Var0.V, Var1.V, DL, &AC, /* CxtI */ nullptr, DT)) 1231 MinAbsVarIndex = Var0.Scale.abs(); 1232 } 1233 1234 if (MinAbsVarIndex) { 1235 // The constant offset will have added at least +/-MinAbsVarIndex to it. 1236 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex; 1237 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex; 1238 // Check that an access at OffsetLo or lower, and an access at OffsetHi 1239 // or higher both do not alias. 1240 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) && 1241 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue())) 1242 return NoAlias; 1243 } 1244 } 1245 1246 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size, 1247 DecompGEP1.Offset, &AC, DT)) 1248 return NoAlias; 1249 } 1250 1251 // Statically, we can see that the base objects are the same, but the 1252 // pointers have dynamic offsets which we can't resolve. And none of our 1253 // little tricks above worked. 1254 return MayAlias; 1255 } 1256 1257 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1258 // If the results agree, take it. 1259 if (A == B) 1260 return A; 1261 // A mix of PartialAlias and MustAlias is PartialAlias. 1262 if ((A == PartialAlias && B == MustAlias) || 1263 (B == PartialAlias && A == MustAlias)) 1264 return PartialAlias; 1265 // Otherwise, we don't know anything. 1266 return MayAlias; 1267 } 1268 1269 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1270 /// against another. 1271 AliasResult 1272 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, 1273 const AAMDNodes &SIAAInfo, const Value *V2, 1274 LocationSize V2Size, const AAMDNodes &V2AAInfo, 1275 AAQueryInfo &AAQI) { 1276 // If the values are Selects with the same condition, we can do a more precise 1277 // check: just check for aliases between the values on corresponding arms. 1278 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1279 if (SI->getCondition() == SI2->getCondition()) { 1280 AliasResult Alias = getBestAAResults().alias( 1281 MemoryLocation(SI->getTrueValue(), SISize, SIAAInfo), 1282 MemoryLocation(SI2->getTrueValue(), V2Size, V2AAInfo), AAQI); 1283 if (Alias == MayAlias) 1284 return MayAlias; 1285 AliasResult ThisAlias = getBestAAResults().alias( 1286 MemoryLocation(SI->getFalseValue(), SISize, SIAAInfo), 1287 MemoryLocation(SI2->getFalseValue(), V2Size, V2AAInfo), AAQI); 1288 return MergeAliasResults(ThisAlias, Alias); 1289 } 1290 1291 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1292 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1293 AliasResult Alias = getBestAAResults().alias( 1294 MemoryLocation(V2, V2Size, V2AAInfo), 1295 MemoryLocation(SI->getTrueValue(), SISize, SIAAInfo), AAQI); 1296 if (Alias == MayAlias) 1297 return MayAlias; 1298 1299 AliasResult ThisAlias = getBestAAResults().alias( 1300 MemoryLocation(V2, V2Size, V2AAInfo), 1301 MemoryLocation(SI->getFalseValue(), SISize, SIAAInfo), AAQI); 1302 return MergeAliasResults(ThisAlias, Alias); 1303 } 1304 1305 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1306 /// another. 1307 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, 1308 const AAMDNodes &PNAAInfo, const Value *V2, 1309 LocationSize V2Size, 1310 const AAMDNodes &V2AAInfo, 1311 AAQueryInfo &AAQI) { 1312 // If the values are PHIs in the same block, we can do a more precise 1313 // as well as efficient check: just check for aliases between the values 1314 // on corresponding edges. 1315 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1316 if (PN2->getParent() == PN->getParent()) { 1317 Optional<AliasResult> Alias; 1318 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1319 AliasResult ThisAlias = getBestAAResults().alias( 1320 MemoryLocation(PN->getIncomingValue(i), PNSize, PNAAInfo), 1321 MemoryLocation( 1322 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size, 1323 V2AAInfo), 1324 AAQI); 1325 if (Alias) 1326 *Alias = MergeAliasResults(*Alias, ThisAlias); 1327 else 1328 Alias = ThisAlias; 1329 if (*Alias == MayAlias) 1330 break; 1331 } 1332 return *Alias; 1333 } 1334 1335 SmallVector<Value *, 4> V1Srcs; 1336 // If a phi operand recurses back to the phi, we can still determine NoAlias 1337 // if we don't alias the underlying objects of the other phi operands, as we 1338 // know that the recursive phi needs to be based on them in some way. 1339 bool isRecursive = false; 1340 auto CheckForRecPhi = [&](Value *PV) { 1341 if (!EnableRecPhiAnalysis) 1342 return false; 1343 if (getUnderlyingObject(PV) == PN) { 1344 isRecursive = true; 1345 return true; 1346 } 1347 return false; 1348 }; 1349 1350 if (PV) { 1351 // If we have PhiValues then use it to get the underlying phi values. 1352 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN); 1353 // If we have more phi values than the search depth then return MayAlias 1354 // conservatively to avoid compile time explosion. The worst possible case 1355 // is if both sides are PHI nodes. In which case, this is O(m x n) time 1356 // where 'm' and 'n' are the number of PHI sources. 1357 if (PhiValueSet.size() > MaxLookupSearchDepth) 1358 return MayAlias; 1359 // Add the values to V1Srcs 1360 for (Value *PV1 : PhiValueSet) { 1361 if (CheckForRecPhi(PV1)) 1362 continue; 1363 V1Srcs.push_back(PV1); 1364 } 1365 } else { 1366 // If we don't have PhiInfo then just look at the operands of the phi itself 1367 // FIXME: Remove this once we can guarantee that we have PhiInfo always 1368 SmallPtrSet<Value *, 4> UniqueSrc; 1369 Value *OnePhi = nullptr; 1370 for (Value *PV1 : PN->incoming_values()) { 1371 if (isa<PHINode>(PV1)) { 1372 if (OnePhi && OnePhi != PV1) { 1373 // To control potential compile time explosion, we choose to be 1374 // conserviate when we have more than one Phi input. It is important 1375 // that we handle the single phi case as that lets us handle LCSSA 1376 // phi nodes and (combined with the recursive phi handling) simple 1377 // pointer induction variable patterns. 1378 return MayAlias; 1379 } 1380 OnePhi = PV1; 1381 } 1382 1383 if (CheckForRecPhi(PV1)) 1384 continue; 1385 1386 if (UniqueSrc.insert(PV1).second) 1387 V1Srcs.push_back(PV1); 1388 } 1389 1390 if (OnePhi && UniqueSrc.size() > 1) 1391 // Out of an abundance of caution, allow only the trivial lcssa and 1392 // recursive phi cases. 1393 return MayAlias; 1394 } 1395 1396 // If V1Srcs is empty then that means that the phi has no underlying non-phi 1397 // value. This should only be possible in blocks unreachable from the entry 1398 // block, but return MayAlias just in case. 1399 if (V1Srcs.empty()) 1400 return MayAlias; 1401 1402 // If this PHI node is recursive, indicate that the pointer may be moved 1403 // across iterations. We can only prove NoAlias if different underlying 1404 // objects are involved. 1405 if (isRecursive) 1406 PNSize = LocationSize::beforeOrAfterPointer(); 1407 1408 // In the recursive alias queries below, we may compare values from two 1409 // different loop iterations. Keep track of visited phi blocks, which will 1410 // be used when determining value equivalence. 1411 bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second; 1412 auto _ = make_scope_exit([&]() { 1413 if (BlockInserted) 1414 VisitedPhiBBs.erase(PN->getParent()); 1415 }); 1416 1417 // If we inserted a block into VisitedPhiBBs, alias analysis results that 1418 // have been cached earlier may no longer be valid. Perform recursive queries 1419 // with a new AAQueryInfo. 1420 AAQueryInfo NewAAQI = AAQI.withEmptyCache(); 1421 AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI; 1422 1423 AliasResult Alias = getBestAAResults().alias( 1424 MemoryLocation(V2, V2Size, V2AAInfo), 1425 MemoryLocation(V1Srcs[0], PNSize, PNAAInfo), *UseAAQI); 1426 1427 // Early exit if the check of the first PHI source against V2 is MayAlias. 1428 // Other results are not possible. 1429 if (Alias == MayAlias) 1430 return MayAlias; 1431 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will 1432 // remain valid to all elements and needs to conservatively return MayAlias. 1433 if (isRecursive && Alias != NoAlias) 1434 return MayAlias; 1435 1436 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1437 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1438 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1439 Value *V = V1Srcs[i]; 1440 1441 AliasResult ThisAlias = getBestAAResults().alias( 1442 MemoryLocation(V2, V2Size, V2AAInfo), 1443 MemoryLocation(V, PNSize, PNAAInfo), *UseAAQI); 1444 Alias = MergeAliasResults(ThisAlias, Alias); 1445 if (Alias == MayAlias) 1446 break; 1447 } 1448 1449 return Alias; 1450 } 1451 1452 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1453 /// array references. 1454 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, 1455 const AAMDNodes &V1AAInfo, 1456 const Value *V2, LocationSize V2Size, 1457 const AAMDNodes &V2AAInfo, 1458 AAQueryInfo &AAQI) { 1459 // If either of the memory references is empty, it doesn't matter what the 1460 // pointer values are. 1461 if (V1Size.isZero() || V2Size.isZero()) 1462 return NoAlias; 1463 1464 // Strip off any casts if they exist. 1465 V1 = V1->stripPointerCastsForAliasAnalysis(); 1466 V2 = V2->stripPointerCastsForAliasAnalysis(); 1467 1468 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1469 // value for undef that aliases nothing in the program. 1470 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1471 return NoAlias; 1472 1473 // Are we checking for alias of the same value? 1474 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1475 // different iterations. We must therefore make sure that this is not the 1476 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1477 // happen by looking at the visited phi nodes and making sure they cannot 1478 // reach the value. 1479 if (isValueEqualInPotentialCycles(V1, V2)) 1480 return MustAlias; 1481 1482 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1483 return NoAlias; // Scalars cannot alias each other 1484 1485 // Figure out what objects these things are pointing to if we can. 1486 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth); 1487 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth); 1488 1489 // Null values in the default address space don't point to any object, so they 1490 // don't alias any other pointer. 1491 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1492 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1493 return NoAlias; 1494 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1495 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1496 return NoAlias; 1497 1498 if (O1 != O2) { 1499 // If V1/V2 point to two different objects, we know that we have no alias. 1500 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1501 return NoAlias; 1502 1503 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1504 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1505 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1506 return NoAlias; 1507 1508 // Function arguments can't alias with things that are known to be 1509 // unambigously identified at the function level. 1510 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1511 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1512 return NoAlias; 1513 1514 // If one pointer is the result of a call/invoke or load and the other is a 1515 // non-escaping local object within the same function, then we know the 1516 // object couldn't escape to a point where the call could return it. 1517 // 1518 // Note that if the pointers are in different functions, there are a 1519 // variety of complications. A call with a nocapture argument may still 1520 // temporary store the nocapture argument's value in a temporary memory 1521 // location if that memory location doesn't escape. Or it may pass a 1522 // nocapture value to other functions as long as they don't capture it. 1523 if (isEscapeSource(O1) && 1524 isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache)) 1525 return NoAlias; 1526 if (isEscapeSource(O2) && 1527 isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache)) 1528 return NoAlias; 1529 } 1530 1531 // If the size of one access is larger than the entire object on the other 1532 // side, then we know such behavior is undefined and can assume no alias. 1533 bool NullIsValidLocation = NullPointerIsDefined(&F); 1534 if ((isObjectSmallerThan( 1535 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL, 1536 TLI, NullIsValidLocation)) || 1537 (isObjectSmallerThan( 1538 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL, 1539 TLI, NullIsValidLocation))) 1540 return NoAlias; 1541 1542 // If one the accesses may be before the accessed pointer, canonicalize this 1543 // by using unknown after-pointer sizes for both accesses. This is 1544 // equivalent, because regardless of which pointer is lower, one of them 1545 // will always came after the other, as long as the underlying objects aren't 1546 // disjoint. We do this so that the rest of BasicAA does not have to deal 1547 // with accesses before the base pointer, and to improve cache utilization by 1548 // merging equivalent states. 1549 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) { 1550 V1Size = LocationSize::afterPointer(); 1551 V2Size = LocationSize::afterPointer(); 1552 } 1553 1554 // FIXME: If this depth limit is hit, then we may cache sub-optimal results 1555 // for recursive queries. For this reason, this limit is chosen to be large 1556 // enough to be very rarely hit, while still being small enough to avoid 1557 // stack overflows. 1558 if (AAQI.Depth >= 512) 1559 return MayAlias; 1560 1561 // Check the cache before climbing up use-def chains. This also terminates 1562 // otherwise infinitely recursive queries. 1563 AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo), 1564 MemoryLocation(V2, V2Size, V2AAInfo)); 1565 if (V1 > V2) 1566 std::swap(Locs.first, Locs.second); 1567 const auto &Pair = AAQI.AliasCache.try_emplace( 1568 Locs, AAQueryInfo::CacheEntry{NoAlias, 0}); 1569 if (!Pair.second) { 1570 auto &Entry = Pair.first->second; 1571 if (!Entry.isDefinitive()) { 1572 // Remember that we used an assumption. 1573 ++Entry.NumAssumptionUses; 1574 ++AAQI.NumAssumptionUses; 1575 } 1576 return Entry.Result; 1577 } 1578 1579 int OrigNumAssumptionUses = AAQI.NumAssumptionUses; 1580 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size(); 1581 AliasResult Result = aliasCheckRecursive(V1, V1Size, V1AAInfo, V2, V2Size, 1582 V2AAInfo, AAQI, O1, O2); 1583 1584 auto It = AAQI.AliasCache.find(Locs); 1585 assert(It != AAQI.AliasCache.end() && "Must be in cache"); 1586 auto &Entry = It->second; 1587 1588 // Check whether a NoAlias assumption has been used, but disproven. 1589 bool AssumptionDisproven = Entry.NumAssumptionUses > 0 && Result != NoAlias; 1590 if (AssumptionDisproven) 1591 Result = MayAlias; 1592 1593 // This is a definitive result now, when considered as a root query. 1594 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses; 1595 Entry.Result = Result; 1596 Entry.NumAssumptionUses = -1; 1597 1598 // If the assumption has been disproven, remove any results that may have 1599 // been based on this assumption. Do this after the Entry updates above to 1600 // avoid iterator invalidation. 1601 if (AssumptionDisproven) 1602 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults) 1603 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val()); 1604 1605 // The result may still be based on assumptions higher up in the chain. 1606 // Remember it, so it can be purged from the cache later. 1607 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses && Result != MayAlias) 1608 AAQI.AssumptionBasedResults.push_back(Locs); 1609 return Result; 1610 } 1611 1612 AliasResult BasicAAResult::aliasCheckRecursive( 1613 const Value *V1, LocationSize V1Size, const AAMDNodes &V1AAInfo, 1614 const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo, 1615 AAQueryInfo &AAQI, const Value *O1, const Value *O2) { 1616 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1617 AliasResult Result = 1618 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI); 1619 if (Result != MayAlias) 1620 return Result; 1621 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) { 1622 AliasResult Result = 1623 aliasGEP(GV2, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, O2, O1, AAQI); 1624 if (Result != MayAlias) 1625 return Result; 1626 } 1627 1628 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1629 AliasResult Result = 1630 aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, AAQI); 1631 if (Result != MayAlias) 1632 return Result; 1633 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) { 1634 AliasResult Result = 1635 aliasPHI(PN, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, AAQI); 1636 if (Result != MayAlias) 1637 return Result; 1638 } 1639 1640 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1641 AliasResult Result = 1642 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, AAQI); 1643 if (Result != MayAlias) 1644 return Result; 1645 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) { 1646 AliasResult Result = 1647 aliasSelect(S2, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, AAQI); 1648 if (Result != MayAlias) 1649 return Result; 1650 } 1651 1652 // If both pointers are pointing into the same object and one of them 1653 // accesses the entire object, then the accesses must overlap in some way. 1654 if (O1 == O2) { 1655 bool NullIsValidLocation = NullPointerIsDefined(&F); 1656 if (V1Size.isPrecise() && V2Size.isPrecise() && 1657 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || 1658 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) 1659 return PartialAlias; 1660 } 1661 1662 return MayAlias; 1663 } 1664 1665 /// Check whether two Values can be considered equivalent. 1666 /// 1667 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1668 /// they can not be part of a cycle in the value graph by looking at all 1669 /// visited phi nodes an making sure that the phis cannot reach the value. We 1670 /// have to do this because we are looking through phi nodes (That is we say 1671 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1672 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1673 const Value *V2) { 1674 if (V != V2) 1675 return false; 1676 1677 const Instruction *Inst = dyn_cast<Instruction>(V); 1678 if (!Inst) 1679 return true; 1680 1681 if (VisitedPhiBBs.empty()) 1682 return true; 1683 1684 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1685 return false; 1686 1687 // Make sure that the visited phis cannot reach the Value. This ensures that 1688 // the Values cannot come from different iterations of a potential cycle the 1689 // phi nodes could be involved in. 1690 for (auto *P : VisitedPhiBBs) 1691 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT)) 1692 return false; 1693 1694 return true; 1695 } 1696 1697 /// Computes the symbolic difference between two de-composed GEPs. 1698 /// 1699 /// Dest and Src are the variable indices from two decomposed GetElementPtr 1700 /// instructions GEP1 and GEP2 which have common base pointers. 1701 void BasicAAResult::GetIndexDifference( 1702 SmallVectorImpl<VariableGEPIndex> &Dest, 1703 const SmallVectorImpl<VariableGEPIndex> &Src) { 1704 if (Src.empty()) 1705 return; 1706 1707 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 1708 const Value *V = Src[i].V; 1709 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits; 1710 APInt Scale = Src[i].Scale; 1711 1712 // Find V in Dest. This is N^2, but pointer indices almost never have more 1713 // than a few variable indexes. 1714 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 1715 if (!isValueEqualInPotentialCycles(Dest[j].V, V) || 1716 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits) 1717 continue; 1718 1719 // If we found it, subtract off Scale V's from the entry in Dest. If it 1720 // goes to zero, remove the entry. 1721 if (Dest[j].Scale != Scale) 1722 Dest[j].Scale -= Scale; 1723 else 1724 Dest.erase(Dest.begin() + j); 1725 Scale = 0; 1726 break; 1727 } 1728 1729 // If we didn't consume this entry, add it to the end of the Dest list. 1730 if (!!Scale) { 1731 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale, Src[i].CxtI}; 1732 Dest.push_back(Entry); 1733 } 1734 } 1735 } 1736 1737 bool BasicAAResult::constantOffsetHeuristic( 1738 const SmallVectorImpl<VariableGEPIndex> &VarIndices, 1739 LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset, 1740 AssumptionCache *AC, DominatorTree *DT) { 1741 if (VarIndices.size() != 2 || !MaybeV1Size.hasValue() || 1742 !MaybeV2Size.hasValue()) 1743 return false; 1744 1745 const uint64_t V1Size = MaybeV1Size.getValue(); 1746 const uint64_t V2Size = MaybeV2Size.getValue(); 1747 1748 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1]; 1749 1750 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits || 1751 Var0.Scale != -Var1.Scale) 1752 return false; 1753 1754 unsigned Width = Var1.V->getType()->getIntegerBitWidth(); 1755 1756 // We'll strip off the Extensions of Var0 and Var1 and do another round 1757 // of GetLinearExpression decomposition. In the example above, if Var0 1758 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 1759 1760 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0), 1761 V1Offset(Width, 0); 1762 bool NSW = true, NUW = true; 1763 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0; 1764 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits, 1765 V0SExtBits, DL, 0, AC, DT, NSW, NUW); 1766 NSW = true; 1767 NUW = true; 1768 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits, 1769 V1SExtBits, DL, 0, AC, DT, NSW, NUW); 1770 1771 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits || 1772 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1)) 1773 return false; 1774 1775 // We have a hit - Var0 and Var1 only differ by a constant offset! 1776 1777 // If we've been sext'ed then zext'd the maximum difference between Var0 and 1778 // Var1 is possible to calculate, but we're just interested in the absolute 1779 // minimum difference between the two. The minimum distance may occur due to 1780 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 1781 // the minimum distance between %i and %i + 5 is 3. 1782 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff; 1783 MinDiff = APIntOps::umin(MinDiff, Wrapped); 1784 APInt MinDiffBytes = 1785 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs(); 1786 1787 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 1788 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 1789 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 1790 // V2Size can fit in the MinDiffBytes gap. 1791 return MinDiffBytes.uge(V1Size + BaseOffset.abs()) && 1792 MinDiffBytes.uge(V2Size + BaseOffset.abs()); 1793 } 1794 1795 //===----------------------------------------------------------------------===// 1796 // BasicAliasAnalysis Pass 1797 //===----------------------------------------------------------------------===// 1798 1799 AnalysisKey BasicAA::Key; 1800 1801 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 1802 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1803 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1804 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 1805 auto *PV = AM.getCachedResult<PhiValuesAnalysis>(F); 1806 return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT, PV); 1807 } 1808 1809 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 1810 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 1811 } 1812 1813 char BasicAAWrapperPass::ID = 0; 1814 1815 void BasicAAWrapperPass::anchor() {} 1816 1817 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", 1818 "Basic Alias Analysis (stateless AA impl)", true, true) 1819 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1820 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1821 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1822 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 1823 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa", 1824 "Basic Alias Analysis (stateless AA impl)", true, true) 1825 1826 FunctionPass *llvm::createBasicAAWrapperPass() { 1827 return new BasicAAWrapperPass(); 1828 } 1829 1830 bool BasicAAWrapperPass::runOnFunction(Function &F) { 1831 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 1832 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 1833 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 1834 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>(); 1835 1836 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, 1837 TLIWP.getTLI(F), ACT.getAssumptionCache(F), 1838 &DTWP.getDomTree(), 1839 PVWP ? &PVWP->getResult() : nullptr)); 1840 1841 return false; 1842 } 1843 1844 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1845 AU.setPreservesAll(); 1846 AU.addRequiredTransitive<AssumptionCacheTracker>(); 1847 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 1848 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 1849 AU.addUsedIfAvailable<PhiValuesWrapperPass>(); 1850 } 1851 1852 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 1853 return BasicAAResult( 1854 F.getParent()->getDataLayout(), F, 1855 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 1856 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 1857 } 1858