1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the primary stateless implementation of the 10 // Alias Analysis interface that implements identities (two different 11 // globals cannot alias, etc), but does no stateful analysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/BasicAliasAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ScopeExit.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/CFG.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/LoopInfo.h" 27 #include "llvm/Analysis/MemoryBuiltins.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Analysis/PhiValues.h" 30 #include "llvm/Analysis/TargetLibraryInfo.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/IR/Argument.h" 33 #include "llvm/IR/Attributes.h" 34 #include "llvm/IR/Constant.h" 35 #include "llvm/IR/Constants.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/DerivedTypes.h" 38 #include "llvm/IR/Dominators.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GetElementPtrTypeIterator.h" 41 #include "llvm/IR/GlobalAlias.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/IR/Intrinsics.h" 48 #include "llvm/IR/Metadata.h" 49 #include "llvm/IR/Operator.h" 50 #include "llvm/IR/Type.h" 51 #include "llvm/IR/User.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/InitializePasses.h" 54 #include "llvm/Pass.h" 55 #include "llvm/Support/Casting.h" 56 #include "llvm/Support/CommandLine.h" 57 #include "llvm/Support/Compiler.h" 58 #include "llvm/Support/KnownBits.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <cstdlib> 62 #include <utility> 63 64 #define DEBUG_TYPE "basicaa" 65 66 using namespace llvm; 67 68 /// Enable analysis of recursive PHI nodes. 69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, 70 cl::init(true)); 71 72 /// By default, even on 32-bit architectures we use 64-bit integers for 73 /// calculations. This will allow us to more-aggressively decompose indexing 74 /// expressions calculated using i64 values (e.g., long long in C) which is 75 /// common enough to worry about. 76 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b", 77 cl::Hidden, cl::init(true)); 78 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits", 79 cl::Hidden, cl::init(false)); 80 81 /// SearchLimitReached / SearchTimes shows how often the limit of 82 /// to decompose GEPs is reached. It will affect the precision 83 /// of basic alias analysis. 84 STATISTIC(SearchLimitReached, "Number of times the limit to " 85 "decompose GEPs is reached"); 86 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 87 88 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 89 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 90 /// careful with value equivalence. We use reachability to make sure a value 91 /// cannot be involved in a cycle. 92 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 93 94 // The max limit of the search depth in DecomposeGEPExpression() and 95 // getUnderlyingObject(), both functions need to use the same search 96 // depth otherwise the algorithm in aliasGEP will assert. 97 static const unsigned MaxLookupSearchDepth = 6; 98 99 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, 100 FunctionAnalysisManager::Invalidator &Inv) { 101 // We don't care if this analysis itself is preserved, it has no state. But 102 // we need to check that the analyses it depends on have been. Note that we 103 // may be created without handles to some analyses and in that case don't 104 // depend on them. 105 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) || 106 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) || 107 (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) || 108 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA))) 109 return true; 110 111 // Otherwise this analysis result remains valid. 112 return false; 113 } 114 115 //===----------------------------------------------------------------------===// 116 // Useful predicates 117 //===----------------------------------------------------------------------===// 118 119 /// Returns true if the pointer is one which would have been considered an 120 /// escape by isNonEscapingLocalObject. 121 static bool isEscapeSource(const Value *V) { 122 if (isa<CallBase>(V)) 123 return true; 124 125 if (isa<Argument>(V)) 126 return true; 127 128 // The load case works because isNonEscapingLocalObject considers all 129 // stores to be escapes (it passes true for the StoreCaptures argument 130 // to PointerMayBeCaptured). 131 if (isa<LoadInst>(V)) 132 return true; 133 134 return false; 135 } 136 137 /// Returns the size of the object specified by V or UnknownSize if unknown. 138 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 139 const TargetLibraryInfo &TLI, 140 bool NullIsValidLoc, 141 bool RoundToAlign = false) { 142 uint64_t Size; 143 ObjectSizeOpts Opts; 144 Opts.RoundToAlign = RoundToAlign; 145 Opts.NullIsUnknownSize = NullIsValidLoc; 146 if (getObjectSize(V, Size, DL, &TLI, Opts)) 147 return Size; 148 return MemoryLocation::UnknownSize; 149 } 150 151 /// Returns true if we can prove that the object specified by V is smaller than 152 /// Size. 153 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 154 const DataLayout &DL, 155 const TargetLibraryInfo &TLI, 156 bool NullIsValidLoc) { 157 // Note that the meanings of the "object" are slightly different in the 158 // following contexts: 159 // c1: llvm::getObjectSize() 160 // c2: llvm.objectsize() intrinsic 161 // c3: isObjectSmallerThan() 162 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 163 // refers to the "entire object". 164 // 165 // Consider this example: 166 // char *p = (char*)malloc(100) 167 // char *q = p+80; 168 // 169 // In the context of c1 and c2, the "object" pointed by q refers to the 170 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 171 // 172 // However, in the context of c3, the "object" refers to the chunk of memory 173 // being allocated. So, the "object" has 100 bytes, and q points to the middle 174 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 175 // parameter, before the llvm::getObjectSize() is called to get the size of 176 // entire object, we should: 177 // - either rewind the pointer q to the base-address of the object in 178 // question (in this case rewind to p), or 179 // - just give up. It is up to caller to make sure the pointer is pointing 180 // to the base address the object. 181 // 182 // We go for 2nd option for simplicity. 183 if (!isIdentifiedObject(V)) 184 return false; 185 186 // This function needs to use the aligned object size because we allow 187 // reads a bit past the end given sufficient alignment. 188 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, 189 /*RoundToAlign*/ true); 190 191 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 192 } 193 194 /// Return the minimal extent from \p V to the end of the underlying object, 195 /// assuming the result is used in an aliasing query. E.g., we do use the query 196 /// location size and the fact that null pointers cannot alias here. 197 static uint64_t getMinimalExtentFrom(const Value &V, 198 const LocationSize &LocSize, 199 const DataLayout &DL, 200 bool NullIsValidLoc) { 201 // If we have dereferenceability information we know a lower bound for the 202 // extent as accesses for a lower offset would be valid. We need to exclude 203 // the "or null" part if null is a valid pointer. 204 bool CanBeNull; 205 uint64_t DerefBytes = V.getPointerDereferenceableBytes(DL, CanBeNull); 206 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; 207 // If queried with a precise location size, we assume that location size to be 208 // accessed, thus valid. 209 if (LocSize.isPrecise()) 210 DerefBytes = std::max(DerefBytes, LocSize.getValue()); 211 return DerefBytes; 212 } 213 214 /// Returns true if we can prove that the object specified by V has size Size. 215 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 216 const TargetLibraryInfo &TLI, bool NullIsValidLoc) { 217 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc); 218 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 219 } 220 221 //===----------------------------------------------------------------------===// 222 // GetElementPtr Instruction Decomposition and Analysis 223 //===----------------------------------------------------------------------===// 224 225 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 226 /// B are constant integers. 227 /// 228 /// Returns the scale and offset values as APInts and return V as a Value*, and 229 /// return whether we looked through any sign or zero extends. The incoming 230 /// Value is known to have IntegerType, and it may already be sign or zero 231 /// extended. 232 /// 233 /// Note that this looks through extends, so the high bits may not be 234 /// represented in the result. 235 /*static*/ const Value *BasicAAResult::GetLinearExpression( 236 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits, 237 unsigned &SExtBits, const DataLayout &DL, unsigned Depth, 238 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) { 239 assert(V->getType()->isIntegerTy() && "Not an integer value"); 240 241 // Limit our recursion depth. 242 if (Depth == 6) { 243 Scale = 1; 244 Offset = 0; 245 return V; 246 } 247 248 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) { 249 // If it's a constant, just convert it to an offset and remove the variable. 250 // If we've been called recursively, the Offset bit width will be greater 251 // than the constant's (the Offset's always as wide as the outermost call), 252 // so we'll zext here and process any extension in the isa<SExtInst> & 253 // isa<ZExtInst> cases below. 254 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth()); 255 assert(Scale == 0 && "Constant values don't have a scale"); 256 return V; 257 } 258 259 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 260 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 261 // If we've been called recursively, then Offset and Scale will be wider 262 // than the BOp operands. We'll always zext it here as we'll process sign 263 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases). 264 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth()); 265 266 switch (BOp->getOpcode()) { 267 default: 268 // We don't understand this instruction, so we can't decompose it any 269 // further. 270 Scale = 1; 271 Offset = 0; 272 return V; 273 case Instruction::Or: 274 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 275 // analyze it. 276 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 277 BOp, DT)) { 278 Scale = 1; 279 Offset = 0; 280 return V; 281 } 282 LLVM_FALLTHROUGH; 283 case Instruction::Add: 284 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 285 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 286 Offset += RHS; 287 break; 288 case Instruction::Sub: 289 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 290 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 291 Offset -= RHS; 292 break; 293 case Instruction::Mul: 294 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 295 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 296 Offset *= RHS; 297 Scale *= RHS; 298 break; 299 case Instruction::Shl: 300 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 301 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 302 303 // We're trying to linearize an expression of the kind: 304 // shl i8 -128, 36 305 // where the shift count exceeds the bitwidth of the type. 306 // We can't decompose this further (the expression would return 307 // a poison value). 308 if (Offset.getBitWidth() < RHS.getLimitedValue() || 309 Scale.getBitWidth() < RHS.getLimitedValue()) { 310 Scale = 1; 311 Offset = 0; 312 return V; 313 } 314 315 Offset <<= RHS.getLimitedValue(); 316 Scale <<= RHS.getLimitedValue(); 317 // the semantics of nsw and nuw for left shifts don't match those of 318 // multiplications, so we won't propagate them. 319 NSW = NUW = false; 320 return V; 321 } 322 323 if (isa<OverflowingBinaryOperator>(BOp)) { 324 NUW &= BOp->hasNoUnsignedWrap(); 325 NSW &= BOp->hasNoSignedWrap(); 326 } 327 return V; 328 } 329 } 330 331 // Since GEP indices are sign extended anyway, we don't care about the high 332 // bits of a sign or zero extended value - just scales and offsets. The 333 // extensions have to be consistent though. 334 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) { 335 Value *CastOp = cast<CastInst>(V)->getOperand(0); 336 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits(); 337 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 338 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits; 339 const Value *Result = 340 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL, 341 Depth + 1, AC, DT, NSW, NUW); 342 343 // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this 344 // by just incrementing the number of bits we've extended by. 345 unsigned ExtendedBy = NewWidth - SmallWidth; 346 347 if (isa<SExtInst>(V) && ZExtBits == 0) { 348 // sext(sext(%x, a), b) == sext(%x, a + b) 349 350 if (NSW) { 351 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c) 352 // into sext(%x) + sext(c). We'll sext the Offset ourselves: 353 unsigned OldWidth = Offset.getBitWidth(); 354 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth); 355 } else { 356 // We may have signed-wrapped, so don't decompose sext(%x + c) into 357 // sext(%x) + sext(c) 358 Scale = 1; 359 Offset = 0; 360 Result = CastOp; 361 ZExtBits = OldZExtBits; 362 SExtBits = OldSExtBits; 363 } 364 SExtBits += ExtendedBy; 365 } else { 366 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b) 367 368 if (!NUW) { 369 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into 370 // zext(%x) + zext(c) 371 Scale = 1; 372 Offset = 0; 373 Result = CastOp; 374 ZExtBits = OldZExtBits; 375 SExtBits = OldSExtBits; 376 } 377 ZExtBits += ExtendedBy; 378 } 379 380 return Result; 381 } 382 383 Scale = 1; 384 Offset = 0; 385 return V; 386 } 387 388 /// To ensure a pointer offset fits in an integer of size PointerSize 389 /// (in bits) when that size is smaller than the maximum pointer size. This is 390 /// an issue, for example, in particular for 32b pointers with negative indices 391 /// that rely on two's complement wrap-arounds for precise alias information 392 /// where the maximum pointer size is 64b. 393 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) { 394 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!"); 395 unsigned ShiftBits = Offset.getBitWidth() - PointerSize; 396 return (Offset << ShiftBits).ashr(ShiftBits); 397 } 398 399 static unsigned getMaxPointerSize(const DataLayout &DL) { 400 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits(); 401 if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64; 402 if (DoubleCalcBits) MaxPointerSize *= 2; 403 404 return MaxPointerSize; 405 } 406 407 /// If V is a symbolic pointer expression, decompose it into a base pointer 408 /// with a constant offset and a number of scaled symbolic offsets. 409 /// 410 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 411 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 412 /// specified amount, but which may have other unrepresented high bits. As 413 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 414 /// 415 /// This function is capable of analyzing everything that getUnderlyingObject 416 /// can look through. To be able to do that getUnderlyingObject and 417 /// DecomposeGEPExpression must use the same search depth 418 /// (MaxLookupSearchDepth). 419 BasicAAResult::DecomposedGEP 420 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL, 421 AssumptionCache *AC, DominatorTree *DT) { 422 // Limit recursion depth to limit compile time in crazy cases. 423 unsigned MaxLookup = MaxLookupSearchDepth; 424 SearchTimes++; 425 426 unsigned MaxPointerSize = getMaxPointerSize(DL); 427 DecomposedGEP Decomposed; 428 Decomposed.Offset = APInt(MaxPointerSize, 0); 429 Decomposed.HasCompileTimeConstantScale = true; 430 do { 431 // See if this is a bitcast or GEP. 432 const Operator *Op = dyn_cast<Operator>(V); 433 if (!Op) { 434 // The only non-operator case we can handle are GlobalAliases. 435 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 436 if (!GA->isInterposable()) { 437 V = GA->getAliasee(); 438 continue; 439 } 440 } 441 Decomposed.Base = V; 442 return Decomposed; 443 } 444 445 if (Op->getOpcode() == Instruction::BitCast || 446 Op->getOpcode() == Instruction::AddrSpaceCast) { 447 V = Op->getOperand(0); 448 continue; 449 } 450 451 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 452 if (!GEPOp) { 453 if (const auto *PHI = dyn_cast<PHINode>(V)) { 454 // Look through single-arg phi nodes created by LCSSA. 455 if (PHI->getNumIncomingValues() == 1) { 456 V = PHI->getIncomingValue(0); 457 continue; 458 } 459 } else if (const auto *Call = dyn_cast<CallBase>(V)) { 460 // CaptureTracking can know about special capturing properties of some 461 // intrinsics like launder.invariant.group, that can't be expressed with 462 // the attributes, but have properties like returning aliasing pointer. 463 // Because some analysis may assume that nocaptured pointer is not 464 // returned from some special intrinsic (because function would have to 465 // be marked with returns attribute), it is crucial to use this function 466 // because it should be in sync with CaptureTracking. Not using it may 467 // cause weird miscompilations where 2 aliasing pointers are assumed to 468 // noalias. 469 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 470 V = RP; 471 continue; 472 } 473 } 474 475 Decomposed.Base = V; 476 return Decomposed; 477 } 478 479 // Don't attempt to analyze GEPs over unsized objects. 480 if (!GEPOp->getSourceElementType()->isSized()) { 481 Decomposed.Base = V; 482 return Decomposed; 483 } 484 485 // Don't attempt to analyze GEPs if index scale is not a compile-time 486 // constant. 487 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) { 488 Decomposed.Base = V; 489 Decomposed.HasCompileTimeConstantScale = false; 490 return Decomposed; 491 } 492 493 unsigned AS = GEPOp->getPointerAddressSpace(); 494 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 495 gep_type_iterator GTI = gep_type_begin(GEPOp); 496 unsigned PointerSize = DL.getPointerSizeInBits(AS); 497 // Assume all GEP operands are constants until proven otherwise. 498 bool GepHasConstantOffset = true; 499 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 500 I != E; ++I, ++GTI) { 501 const Value *Index = *I; 502 // Compute the (potentially symbolic) offset in bytes for this index. 503 if (StructType *STy = GTI.getStructTypeOrNull()) { 504 // For a struct, add the member offset. 505 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 506 if (FieldNo == 0) 507 continue; 508 509 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo); 510 continue; 511 } 512 513 // For an array/pointer, add the element offset, explicitly scaled. 514 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 515 if (CIdx->isZero()) 516 continue; 517 Decomposed.Offset += 518 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() * 519 CIdx->getValue().sextOrTrunc(MaxPointerSize); 520 continue; 521 } 522 523 GepHasConstantOffset = false; 524 525 APInt Scale(MaxPointerSize, 526 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); 527 unsigned ZExtBits = 0, SExtBits = 0; 528 529 // If the integer type is smaller than the pointer size, it is implicitly 530 // sign extended to pointer size. 531 unsigned Width = Index->getType()->getIntegerBitWidth(); 532 if (PointerSize > Width) 533 SExtBits += PointerSize - Width; 534 535 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 536 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 537 bool NSW = true, NUW = true; 538 const Value *OrigIndex = Index; 539 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits, 540 SExtBits, DL, 0, AC, DT, NSW, NUW); 541 542 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 543 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 544 545 // It can be the case that, even through C1*V+C2 does not overflow for 546 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot 547 // decompose the expression in this way. 548 // 549 // FIXME: C1*Scale and the other operations in the decomposed 550 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this 551 // possibility. 552 bool Overflow; 553 APInt ScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize) 554 .smul_ov(Scale, Overflow); 555 if (Overflow) { 556 Index = OrigIndex; 557 IndexScale = 1; 558 IndexOffset = 0; 559 560 ZExtBits = SExtBits = 0; 561 if (PointerSize > Width) 562 SExtBits += PointerSize - Width; 563 } else { 564 Decomposed.Offset += ScaledOffset; 565 Scale *= IndexScale.sextOrTrunc(MaxPointerSize); 566 } 567 568 // If we already had an occurrence of this index variable, merge this 569 // scale into it. For example, we want to handle: 570 // A[x][x] -> x*16 + x*4 -> x*20 571 // This also ensures that 'x' only appears in the index list once. 572 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 573 if (Decomposed.VarIndices[i].V == Index && 574 Decomposed.VarIndices[i].ZExtBits == ZExtBits && 575 Decomposed.VarIndices[i].SExtBits == SExtBits) { 576 Scale += Decomposed.VarIndices[i].Scale; 577 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 578 break; 579 } 580 } 581 582 // Make sure that we have a scale that makes sense for this target's 583 // pointer size. 584 Scale = adjustToPointerSize(Scale, PointerSize); 585 586 if (!!Scale) { 587 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale}; 588 Decomposed.VarIndices.push_back(Entry); 589 } 590 } 591 592 // Take care of wrap-arounds 593 if (GepHasConstantOffset) 594 Decomposed.Offset = adjustToPointerSize(Decomposed.Offset, PointerSize); 595 596 // Analyze the base pointer next. 597 V = GEPOp->getOperand(0); 598 } while (--MaxLookup); 599 600 // If the chain of expressions is too deep, just return early. 601 Decomposed.Base = V; 602 SearchLimitReached++; 603 return Decomposed; 604 } 605 606 /// Returns whether the given pointer value points to memory that is local to 607 /// the function, with global constants being considered local to all 608 /// functions. 609 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 610 AAQueryInfo &AAQI, bool OrLocal) { 611 assert(Visited.empty() && "Visited must be cleared after use!"); 612 613 unsigned MaxLookup = 8; 614 SmallVector<const Value *, 16> Worklist; 615 Worklist.push_back(Loc.Ptr); 616 do { 617 const Value *V = getUnderlyingObject(Worklist.pop_back_val()); 618 if (!Visited.insert(V).second) { 619 Visited.clear(); 620 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 621 } 622 623 // An alloca instruction defines local memory. 624 if (OrLocal && isa<AllocaInst>(V)) 625 continue; 626 627 // A global constant counts as local memory for our purposes. 628 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 629 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 630 // global to be marked constant in some modules and non-constant in 631 // others. GV may even be a declaration, not a definition. 632 if (!GV->isConstant()) { 633 Visited.clear(); 634 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 635 } 636 continue; 637 } 638 639 // If both select values point to local memory, then so does the select. 640 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 641 Worklist.push_back(SI->getTrueValue()); 642 Worklist.push_back(SI->getFalseValue()); 643 continue; 644 } 645 646 // If all values incoming to a phi node point to local memory, then so does 647 // the phi. 648 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 649 // Don't bother inspecting phi nodes with many operands. 650 if (PN->getNumIncomingValues() > MaxLookup) { 651 Visited.clear(); 652 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 653 } 654 for (Value *IncValue : PN->incoming_values()) 655 Worklist.push_back(IncValue); 656 continue; 657 } 658 659 // Otherwise be conservative. 660 Visited.clear(); 661 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 662 } while (!Worklist.empty() && --MaxLookup); 663 664 Visited.clear(); 665 return Worklist.empty(); 666 } 667 668 /// Returns the behavior when calling the given call site. 669 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { 670 if (Call->doesNotAccessMemory()) 671 // Can't do better than this. 672 return FMRB_DoesNotAccessMemory; 673 674 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 675 676 // If the callsite knows it only reads memory, don't return worse 677 // than that. 678 if (Call->onlyReadsMemory()) 679 Min = FMRB_OnlyReadsMemory; 680 else if (Call->doesNotReadMemory()) 681 Min = FMRB_OnlyWritesMemory; 682 683 if (Call->onlyAccessesArgMemory()) 684 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 685 else if (Call->onlyAccessesInaccessibleMemory()) 686 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 687 else if (Call->onlyAccessesInaccessibleMemOrArgMem()) 688 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 689 690 // If the call has operand bundles then aliasing attributes from the function 691 // it calls do not directly apply to the call. This can be made more precise 692 // in the future. 693 if (!Call->hasOperandBundles()) 694 if (const Function *F = Call->getCalledFunction()) 695 Min = 696 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 697 698 return Min; 699 } 700 701 /// Returns the behavior when calling the given function. For use when the call 702 /// site is not known. 703 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 704 // If the function declares it doesn't access memory, we can't do better. 705 if (F->doesNotAccessMemory()) 706 return FMRB_DoesNotAccessMemory; 707 708 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 709 710 // If the function declares it only reads memory, go with that. 711 if (F->onlyReadsMemory()) 712 Min = FMRB_OnlyReadsMemory; 713 else if (F->doesNotReadMemory()) 714 Min = FMRB_OnlyWritesMemory; 715 716 if (F->onlyAccessesArgMemory()) 717 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 718 else if (F->onlyAccessesInaccessibleMemory()) 719 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 720 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 721 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 722 723 return Min; 724 } 725 726 /// Returns true if this is a writeonly (i.e Mod only) parameter. 727 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, 728 const TargetLibraryInfo &TLI) { 729 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) 730 return true; 731 732 // We can bound the aliasing properties of memset_pattern16 just as we can 733 // for memcpy/memset. This is particularly important because the 734 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 735 // whenever possible. 736 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 737 // attributes. 738 LibFunc F; 739 if (Call->getCalledFunction() && 740 TLI.getLibFunc(*Call->getCalledFunction(), F) && 741 F == LibFunc_memset_pattern16 && TLI.has(F)) 742 if (ArgIdx == 0) 743 return true; 744 745 // TODO: memset_pattern4, memset_pattern8 746 // TODO: _chk variants 747 // TODO: strcmp, strcpy 748 749 return false; 750 } 751 752 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, 753 unsigned ArgIdx) { 754 // Checking for known builtin intrinsics and target library functions. 755 if (isWriteOnlyParam(Call, ArgIdx, TLI)) 756 return ModRefInfo::Mod; 757 758 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) 759 return ModRefInfo::Ref; 760 761 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) 762 return ModRefInfo::NoModRef; 763 764 return AAResultBase::getArgModRefInfo(Call, ArgIdx); 765 } 766 767 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { 768 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call); 769 return II && II->getIntrinsicID() == IID; 770 } 771 772 #ifndef NDEBUG 773 static const Function *getParent(const Value *V) { 774 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 775 if (!inst->getParent()) 776 return nullptr; 777 return inst->getParent()->getParent(); 778 } 779 780 if (const Argument *arg = dyn_cast<Argument>(V)) 781 return arg->getParent(); 782 783 return nullptr; 784 } 785 786 static bool notDifferentParent(const Value *O1, const Value *O2) { 787 788 const Function *F1 = getParent(O1); 789 const Function *F2 = getParent(O2); 790 791 return !F1 || !F2 || F1 == F2; 792 } 793 #endif 794 795 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 796 const MemoryLocation &LocB, 797 AAQueryInfo &AAQI) { 798 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 799 "BasicAliasAnalysis doesn't support interprocedural queries."); 800 801 // If we have a directly cached entry for these locations, we have recursed 802 // through this once, so just return the cached results. Notably, when this 803 // happens, we don't clear the cache. 804 AAQueryInfo::LocPair Locs(LocA, LocB); 805 if (Locs.first.Ptr > Locs.second.Ptr) 806 std::swap(Locs.first, Locs.second); 807 auto CacheIt = AAQI.AliasCache.find(Locs); 808 if (CacheIt != AAQI.AliasCache.end()) 809 return CacheIt->second; 810 811 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr, 812 LocB.Size, LocB.AATags, AAQI); 813 814 assert(VisitedPhiBBs.empty()); 815 return Alias; 816 } 817 818 /// Checks to see if the specified callsite can clobber the specified memory 819 /// object. 820 /// 821 /// Since we only look at local properties of this function, we really can't 822 /// say much about this query. We do, however, use simple "address taken" 823 /// analysis on local objects. 824 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, 825 const MemoryLocation &Loc, 826 AAQueryInfo &AAQI) { 827 assert(notDifferentParent(Call, Loc.Ptr) && 828 "AliasAnalysis query involving multiple functions!"); 829 830 const Value *Object = getUnderlyingObject(Loc.Ptr); 831 832 // Calls marked 'tail' cannot read or write allocas from the current frame 833 // because the current frame might be destroyed by the time they run. However, 834 // a tail call may use an alloca with byval. Calling with byval copies the 835 // contents of the alloca into argument registers or stack slots, so there is 836 // no lifetime issue. 837 if (isa<AllocaInst>(Object)) 838 if (const CallInst *CI = dyn_cast<CallInst>(Call)) 839 if (CI->isTailCall() && 840 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 841 return ModRefInfo::NoModRef; 842 843 // Stack restore is able to modify unescaped dynamic allocas. Assume it may 844 // modify them even though the alloca is not escaped. 845 if (auto *AI = dyn_cast<AllocaInst>(Object)) 846 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore)) 847 return ModRefInfo::Mod; 848 849 // If the pointer is to a locally allocated object that does not escape, 850 // then the call can not mod/ref the pointer unless the call takes the pointer 851 // as an argument, and itself doesn't capture it. 852 if (!isa<Constant>(Object) && Call != Object && 853 isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) { 854 855 // Optimistically assume that call doesn't touch Object and check this 856 // assumption in the following loop. 857 ModRefInfo Result = ModRefInfo::NoModRef; 858 bool IsMustAlias = true; 859 860 unsigned OperandNo = 0; 861 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); 862 CI != CE; ++CI, ++OperandNo) { 863 // Only look at the no-capture or byval pointer arguments. If this 864 // pointer were passed to arguments that were neither of these, then it 865 // couldn't be no-capture. 866 if (!(*CI)->getType()->isPointerTy() || 867 (!Call->doesNotCapture(OperandNo) && 868 OperandNo < Call->getNumArgOperands() && 869 !Call->isByValArgument(OperandNo))) 870 continue; 871 872 // Call doesn't access memory through this operand, so we don't care 873 // if it aliases with Object. 874 if (Call->doesNotAccessMemory(OperandNo)) 875 continue; 876 877 // If this is a no-capture pointer argument, see if we can tell that it 878 // is impossible to alias the pointer we're checking. 879 AliasResult AR = getBestAAResults().alias( 880 MemoryLocation::getBeforeOrAfter(*CI), 881 MemoryLocation::getBeforeOrAfter(Object), AAQI); 882 if (AR != MustAlias) 883 IsMustAlias = false; 884 // Operand doesn't alias 'Object', continue looking for other aliases 885 if (AR == NoAlias) 886 continue; 887 // Operand aliases 'Object', but call doesn't modify it. Strengthen 888 // initial assumption and keep looking in case if there are more aliases. 889 if (Call->onlyReadsMemory(OperandNo)) { 890 Result = setRef(Result); 891 continue; 892 } 893 // Operand aliases 'Object' but call only writes into it. 894 if (Call->doesNotReadMemory(OperandNo)) { 895 Result = setMod(Result); 896 continue; 897 } 898 // This operand aliases 'Object' and call reads and writes into it. 899 // Setting ModRef will not yield an early return below, MustAlias is not 900 // used further. 901 Result = ModRefInfo::ModRef; 902 break; 903 } 904 905 // No operand aliases, reset Must bit. Add below if at least one aliases 906 // and all aliases found are MustAlias. 907 if (isNoModRef(Result)) 908 IsMustAlias = false; 909 910 // Early return if we improved mod ref information 911 if (!isModAndRefSet(Result)) { 912 if (isNoModRef(Result)) 913 return ModRefInfo::NoModRef; 914 return IsMustAlias ? setMust(Result) : clearMust(Result); 915 } 916 } 917 918 // If the call is malloc/calloc like, we can assume that it doesn't 919 // modify any IR visible value. This is only valid because we assume these 920 // routines do not read values visible in the IR. TODO: Consider special 921 // casing realloc and strdup routines which access only their arguments as 922 // well. Or alternatively, replace all of this with inaccessiblememonly once 923 // that's implemented fully. 924 if (isMallocOrCallocLikeFn(Call, &TLI)) { 925 // Be conservative if the accessed pointer may alias the allocation - 926 // fallback to the generic handling below. 927 if (getBestAAResults().alias(MemoryLocation::getBeforeOrAfter(Call), 928 Loc, AAQI) == NoAlias) 929 return ModRefInfo::NoModRef; 930 } 931 932 // The semantics of memcpy intrinsics either exactly overlap or do not 933 // overlap, i.e., source and destination of any given memcpy are either 934 // no-alias or must-alias. 935 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) { 936 AliasResult SrcAA = 937 getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI); 938 AliasResult DestAA = 939 getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI); 940 // It's also possible for Loc to alias both src and dest, or neither. 941 ModRefInfo rv = ModRefInfo::NoModRef; 942 if (SrcAA != NoAlias) 943 rv = setRef(rv); 944 if (DestAA != NoAlias) 945 rv = setMod(rv); 946 return rv; 947 } 948 949 // While the assume intrinsic is marked as arbitrarily writing so that 950 // proper control dependencies will be maintained, it never aliases any 951 // particular memory location. 952 if (isIntrinsicCall(Call, Intrinsic::assume)) 953 return ModRefInfo::NoModRef; 954 955 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 956 // that proper control dependencies are maintained but they never mods any 957 // particular memory location. 958 // 959 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 960 // heap state at the point the guard is issued needs to be consistent in case 961 // the guard invokes the "deopt" continuation. 962 if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) 963 return ModRefInfo::Ref; 964 // The same applies to deoptimize which is essentially a guard(false). 965 if (isIntrinsicCall(Call, Intrinsic::experimental_deoptimize)) 966 return ModRefInfo::Ref; 967 968 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 969 // writing so that proper control dependencies are maintained but they never 970 // mod any particular memory location visible to the IR. 971 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 972 // intrinsic is now modeled as reading memory. This prevents hoisting the 973 // invariant.start intrinsic over stores. Consider: 974 // *ptr = 40; 975 // *ptr = 50; 976 // invariant_start(ptr) 977 // int val = *ptr; 978 // print(val); 979 // 980 // This cannot be transformed to: 981 // 982 // *ptr = 40; 983 // invariant_start(ptr) 984 // *ptr = 50; 985 // int val = *ptr; 986 // print(val); 987 // 988 // The transformation will cause the second store to be ignored (based on 989 // rules of invariant.start) and print 40, while the first program always 990 // prints 50. 991 if (isIntrinsicCall(Call, Intrinsic::invariant_start)) 992 return ModRefInfo::Ref; 993 994 // The AAResultBase base class has some smarts, lets use them. 995 return AAResultBase::getModRefInfo(Call, Loc, AAQI); 996 } 997 998 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, 999 const CallBase *Call2, 1000 AAQueryInfo &AAQI) { 1001 // While the assume intrinsic is marked as arbitrarily writing so that 1002 // proper control dependencies will be maintained, it never aliases any 1003 // particular memory location. 1004 if (isIntrinsicCall(Call1, Intrinsic::assume) || 1005 isIntrinsicCall(Call2, Intrinsic::assume)) 1006 return ModRefInfo::NoModRef; 1007 1008 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 1009 // that proper control dependencies are maintained but they never mod any 1010 // particular memory location. 1011 // 1012 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1013 // heap state at the point the guard is issued needs to be consistent in case 1014 // the guard invokes the "deopt" continuation. 1015 1016 // NB! This function is *not* commutative, so we special case two 1017 // possibilities for guard intrinsics. 1018 1019 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) 1020 return isModSet(createModRefInfo(getModRefBehavior(Call2))) 1021 ? ModRefInfo::Ref 1022 : ModRefInfo::NoModRef; 1023 1024 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) 1025 return isModSet(createModRefInfo(getModRefBehavior(Call1))) 1026 ? ModRefInfo::Mod 1027 : ModRefInfo::NoModRef; 1028 1029 // The AAResultBase base class has some smarts, lets use them. 1030 return AAResultBase::getModRefInfo(Call1, Call2, AAQI); 1031 } 1032 1033 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators, 1034 /// both having the exact same pointer operand. 1035 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, 1036 LocationSize MaybeV1Size, 1037 const GEPOperator *GEP2, 1038 LocationSize MaybeV2Size, 1039 const DataLayout &DL) { 1040 assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() == 1041 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() && 1042 GEP1->getPointerOperandType() == GEP2->getPointerOperandType() && 1043 "Expected GEPs with the same pointer operand"); 1044 1045 // Try to determine whether GEP1 and GEP2 index through arrays, into structs, 1046 // such that the struct field accesses provably cannot alias. 1047 // We also need at least two indices (the pointer, and the struct field). 1048 if (GEP1->getNumIndices() != GEP2->getNumIndices() || 1049 GEP1->getNumIndices() < 2) 1050 return MayAlias; 1051 1052 // If we don't know the size of the accesses through both GEPs, we can't 1053 // determine whether the struct fields accessed can't alias. 1054 if (!MaybeV1Size.hasValue() || !MaybeV2Size.hasValue()) 1055 return MayAlias; 1056 1057 const uint64_t V1Size = MaybeV1Size.getValue(); 1058 const uint64_t V2Size = MaybeV2Size.getValue(); 1059 1060 ConstantInt *C1 = 1061 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1)); 1062 ConstantInt *C2 = 1063 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1)); 1064 1065 // If the last (struct) indices are constants and are equal, the other indices 1066 // might be also be dynamically equal, so the GEPs can alias. 1067 if (C1 && C2) { 1068 unsigned BitWidth = std::max(C1->getBitWidth(), C2->getBitWidth()); 1069 if (C1->getValue().sextOrSelf(BitWidth) == 1070 C2->getValue().sextOrSelf(BitWidth)) 1071 return MayAlias; 1072 } 1073 1074 // Find the last-indexed type of the GEP, i.e., the type you'd get if 1075 // you stripped the last index. 1076 // On the way, look at each indexed type. If there's something other 1077 // than an array, different indices can lead to different final types. 1078 SmallVector<Value *, 8> IntermediateIndices; 1079 1080 // Insert the first index; we don't need to check the type indexed 1081 // through it as it only drops the pointer indirection. 1082 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine"); 1083 IntermediateIndices.push_back(GEP1->getOperand(1)); 1084 1085 // Insert all the remaining indices but the last one. 1086 // Also, check that they all index through arrays. 1087 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) { 1088 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType( 1089 GEP1->getSourceElementType(), IntermediateIndices))) 1090 return MayAlias; 1091 IntermediateIndices.push_back(GEP1->getOperand(i + 1)); 1092 } 1093 1094 auto *Ty = GetElementPtrInst::getIndexedType( 1095 GEP1->getSourceElementType(), IntermediateIndices); 1096 if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) { 1097 // We know that: 1098 // - both GEPs begin indexing from the exact same pointer; 1099 // - the last indices in both GEPs are constants, indexing into a sequential 1100 // type (array or vector); 1101 // - both GEPs only index through arrays prior to that. 1102 // 1103 // Because array indices greater than the number of elements are valid in 1104 // GEPs, unless we know the intermediate indices are identical between 1105 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't 1106 // partially overlap. We also need to check that the loaded size matches 1107 // the element size, otherwise we could still have overlap. 1108 Type *LastElementTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0); 1109 const uint64_t ElementSize = 1110 DL.getTypeStoreSize(LastElementTy).getFixedSize(); 1111 if (V1Size != ElementSize || V2Size != ElementSize) 1112 return MayAlias; 1113 1114 for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i) 1115 if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1)) 1116 return MayAlias; 1117 1118 // Now we know that the array/pointer that GEP1 indexes into and that 1119 // that GEP2 indexes into must either precisely overlap or be disjoint. 1120 // Because they cannot partially overlap and because fields in an array 1121 // cannot overlap, if we can prove the final indices are different between 1122 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias. 1123 1124 // If the last indices are constants, we've already checked they don't 1125 // equal each other so we can exit early. 1126 if (C1 && C2) 1127 return NoAlias; 1128 { 1129 Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1); 1130 Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1); 1131 if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) { 1132 // If one of the indices is a PHI node, be safe and only use 1133 // computeKnownBits so we don't make any assumptions about the 1134 // relationships between the two indices. This is important if we're 1135 // asking about values from different loop iterations. See PR32314. 1136 // TODO: We may be able to change the check so we only do this when 1137 // we definitely looked through a PHINode. 1138 if (GEP1LastIdx != GEP2LastIdx && 1139 GEP1LastIdx->getType() == GEP2LastIdx->getType()) { 1140 KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL); 1141 KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL); 1142 if (Known1.Zero.intersects(Known2.One) || 1143 Known1.One.intersects(Known2.Zero)) 1144 return NoAlias; 1145 } 1146 } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL)) 1147 return NoAlias; 1148 } 1149 } 1150 return MayAlias; 1151 } 1152 1153 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the 1154 // beginning of the object the GEP points would have a negative offset with 1155 // repsect to the alloca, that means the GEP can not alias pointer (b). 1156 // Note that the pointer based on the alloca may not be a GEP. For 1157 // example, it may be the alloca itself. 1158 // The same applies if (b) is based on a GlobalVariable. Note that just being 1159 // based on isIdentifiedObject() is not enough - we need an identified object 1160 // that does not permit access to negative offsets. For example, a negative 1161 // offset from a noalias argument or call can be inbounds w.r.t the actual 1162 // underlying object. 1163 // 1164 // For example, consider: 1165 // 1166 // struct { int f0, int f1, ...} foo; 1167 // foo alloca; 1168 // foo* random = bar(alloca); 1169 // int *f0 = &alloca.f0 1170 // int *f1 = &random->f1; 1171 // 1172 // Which is lowered, approximately, to: 1173 // 1174 // %alloca = alloca %struct.foo 1175 // %random = call %struct.foo* @random(%struct.foo* %alloca) 1176 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0 1177 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1 1178 // 1179 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated 1180 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also 1181 // point into the same object. But since %f0 points to the beginning of %alloca, 1182 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher 1183 // than (%alloca - 1), and so is not inbounds, a contradiction. 1184 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp, 1185 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject, 1186 LocationSize MaybeObjectAccessSize) { 1187 // If the object access size is unknown, or the GEP isn't inbounds, bail. 1188 if (!MaybeObjectAccessSize.hasValue() || !GEPOp->isInBounds()) 1189 return false; 1190 1191 const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue(); 1192 1193 // We need the object to be an alloca or a globalvariable, and want to know 1194 // the offset of the pointer from the object precisely, so no variable 1195 // indices are allowed. 1196 if (!(isa<AllocaInst>(DecompObject.Base) || 1197 isa<GlobalVariable>(DecompObject.Base)) || 1198 !DecompObject.VarIndices.empty()) 1199 return false; 1200 1201 // If the GEP has no variable indices, we know the precise offset 1202 // from the base, then use it. If the GEP has variable indices, 1203 // we can't get exact GEP offset to identify pointer alias. So return 1204 // false in that case. 1205 if (!DecompGEP.VarIndices.empty()) 1206 return false; 1207 1208 return DecompGEP.Offset.sge(DecompObject.Offset + (int64_t)ObjectAccessSize); 1209 } 1210 1211 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1212 /// another pointer. 1213 /// 1214 /// We know that V1 is a GEP, but we don't know anything about V2. 1215 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for 1216 /// V2. 1217 AliasResult BasicAAResult::aliasGEP( 1218 const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo, 1219 const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo, 1220 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { 1221 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT); 1222 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT); 1223 1224 // Don't attempt to analyze the decomposed GEP if index scale is not a 1225 // compile-time constant. 1226 if (!DecompGEP1.HasCompileTimeConstantScale || 1227 !DecompGEP2.HasCompileTimeConstantScale) 1228 return MayAlias; 1229 1230 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 && 1231 "DecomposeGEPExpression returned a result different from " 1232 "getUnderlyingObject"); 1233 1234 // If the GEP's offset relative to its base is such that the base would 1235 // fall below the start of the object underlying V2, then the GEP and V2 1236 // cannot alias. 1237 if (isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size)) 1238 return NoAlias; 1239 // If we have two gep instructions with must-alias or not-alias'ing base 1240 // pointers, figure out if the indexes to the GEP tell us anything about the 1241 // derived pointer. 1242 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 1243 // Check for the GEP base being at a negative offset, this time in the other 1244 // direction. 1245 if (isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size)) 1246 return NoAlias; 1247 // Do the base pointers alias? 1248 AliasResult BaseAlias = aliasCheck( 1249 UnderlyingV1, LocationSize::beforeOrAfterPointer(), AAMDNodes(), 1250 UnderlyingV2, LocationSize::beforeOrAfterPointer(), AAMDNodes(), AAQI); 1251 1252 // For GEPs with identical offsets, we can preserve the size and AAInfo 1253 // when performing the alias check on the underlying objects. 1254 if (BaseAlias == MayAlias && DecompGEP1.Offset == DecompGEP2.Offset && 1255 DecompGEP1.VarIndices == DecompGEP2.VarIndices) { 1256 AliasResult PreciseBaseAlias = aliasCheck( 1257 UnderlyingV1, V1Size, V1AAInfo, UnderlyingV2, V2Size, V2AAInfo, AAQI); 1258 if (PreciseBaseAlias == NoAlias) 1259 return NoAlias; 1260 } 1261 1262 // If we get a No or May, then return it immediately, no amount of analysis 1263 // will improve this situation. 1264 if (BaseAlias != MustAlias) { 1265 assert(BaseAlias == NoAlias || BaseAlias == MayAlias); 1266 return BaseAlias; 1267 } 1268 1269 // Otherwise, we have a MustAlias. Since the base pointers alias each other 1270 // exactly, see if the computed offset from the common pointer tells us 1271 // about the relation of the resulting pointer. 1272 // If we know the two GEPs are based off of the exact same pointer (and not 1273 // just the same underlying object), see if that tells us anything about 1274 // the resulting pointers. 1275 if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() == 1276 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() && 1277 GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) { 1278 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL); 1279 // If we couldn't find anything interesting, don't abandon just yet. 1280 if (R != MayAlias) 1281 return R; 1282 } 1283 1284 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1285 // symbolic difference. 1286 DecompGEP1.Offset -= DecompGEP2.Offset; 1287 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices); 1288 1289 } else { 1290 // Check to see if these two pointers are related by the getelementptr 1291 // instruction. If one pointer is a GEP with a non-zero index of the other 1292 // pointer, we know they cannot alias. 1293 1294 // If both accesses are unknown size, we can't do anything useful here. 1295 if (!V1Size.hasValue() && !V2Size.hasValue()) 1296 return MayAlias; 1297 1298 AliasResult R = aliasCheck( 1299 UnderlyingV1, LocationSize::beforeOrAfterPointer(), AAMDNodes(), 1300 V2, V2Size, V2AAInfo, AAQI, nullptr, UnderlyingV2); 1301 if (R != MustAlias) { 1302 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 1303 // If V2 is known not to alias GEP base pointer, then the two values 1304 // cannot alias per GEP semantics: "Any memory access must be done through 1305 // a pointer value associated with an address range of the memory access, 1306 // otherwise the behavior is undefined.". 1307 assert(R == NoAlias || R == MayAlias); 1308 return R; 1309 } 1310 } 1311 1312 // In the two GEP Case, if there is no difference in the offsets of the 1313 // computed pointers, the resultant pointers are a must alias. This 1314 // happens when we have two lexically identical GEP's (for example). 1315 // 1316 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 1317 // must aliases the GEP, the end result is a must alias also. 1318 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty()) 1319 return MustAlias; 1320 1321 // If there is a constant difference between the pointers, but the difference 1322 // is less than the size of the associated memory object, then we know 1323 // that the objects are partially overlapping. If the difference is 1324 // greater, we know they do not overlap. 1325 if (DecompGEP1.Offset != 0 && DecompGEP1.VarIndices.empty()) { 1326 if (DecompGEP1.Offset.sge(0)) { 1327 if (V2Size.hasValue()) { 1328 if (DecompGEP1.Offset.ult(V2Size.getValue())) 1329 return PartialAlias; 1330 return NoAlias; 1331 } 1332 } else { 1333 // We have the situation where: 1334 // + + 1335 // | BaseOffset | 1336 // ---------------->| 1337 // |-->V1Size |-------> V2Size 1338 // GEP1 V2 1339 if (V1Size.hasValue()) { 1340 if ((-DecompGEP1.Offset).ult(V1Size.getValue())) 1341 return PartialAlias; 1342 return NoAlias; 1343 } 1344 } 1345 } 1346 1347 if (!DecompGEP1.VarIndices.empty()) { 1348 APInt GCD; 1349 bool AllNonNegative = DecompGEP1.Offset.isNonNegative(); 1350 bool AllNonPositive = DecompGEP1.Offset.isNonPositive(); 1351 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1352 const APInt &Scale = DecompGEP1.VarIndices[i].Scale; 1353 if (i == 0) 1354 GCD = Scale.abs(); 1355 else 1356 GCD = APIntOps::GreatestCommonDivisor(GCD, Scale.abs()); 1357 1358 if (AllNonNegative || AllNonPositive) { 1359 // If the Value could change between cycles, then any reasoning about 1360 // the Value this cycle may not hold in the next cycle. We'll just 1361 // give up if we can't determine conditions that hold for every cycle: 1362 const Value *V = DecompGEP1.VarIndices[i].V; 1363 1364 KnownBits Known = 1365 computeKnownBits(V, DL, 0, &AC, dyn_cast<Instruction>(GEP1), DT); 1366 bool SignKnownZero = Known.isNonNegative(); 1367 bool SignKnownOne = Known.isNegative(); 1368 1369 // Zero-extension widens the variable, and so forces the sign 1370 // bit to zero. 1371 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V); 1372 SignKnownZero |= IsZExt; 1373 SignKnownOne &= !IsZExt; 1374 1375 AllNonNegative &= (SignKnownZero && Scale.isNonNegative()) || 1376 (SignKnownOne && Scale.isNonPositive()); 1377 AllNonPositive &= (SignKnownZero && Scale.isNonPositive()) || 1378 (SignKnownOne && Scale.isNonNegative()); 1379 } 1380 } 1381 1382 // We now have accesses at two offsets from the same base: 1383 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size 1384 // 2. 0 with size V2Size 1385 // Using arithmetic modulo GCD, the accesses are at 1386 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits 1387 // into the range [V2Size..GCD), then we know they cannot overlap. 1388 APInt ModOffset = DecompGEP1.Offset.srem(GCD); 1389 if (ModOffset.isNegative()) 1390 ModOffset += GCD; // We want mod, not rem. 1391 if (V1Size.hasValue() && V2Size.hasValue() && 1392 ModOffset.uge(V2Size.getValue()) && 1393 (GCD - ModOffset).uge(V1Size.getValue())) 1394 return NoAlias; 1395 1396 // If we know all the variables are non-negative, then the total offset is 1397 // also non-negative and >= DecompGEP1.Offset. We have the following layout: 1398 // [0, V2Size) ... [TotalOffset, TotalOffer+V1Size] 1399 // If DecompGEP1.Offset >= V2Size, the accesses don't alias. 1400 if (AllNonNegative && V2Size.hasValue() && 1401 DecompGEP1.Offset.uge(V2Size.getValue())) 1402 return NoAlias; 1403 // Similarly, if the variables are non-positive, then the total offset is 1404 // also non-positive and <= DecompGEP1.Offset. We have the following layout: 1405 // [TotalOffset, TotalOffset+V1Size) ... [0, V2Size) 1406 // If -DecompGEP1.Offset >= V1Size, the accesses don't alias. 1407 if (AllNonPositive && V1Size.hasValue() && 1408 (-DecompGEP1.Offset).uge(V1Size.getValue())) 1409 return NoAlias; 1410 1411 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size, 1412 DecompGEP1.Offset, &AC, DT)) 1413 return NoAlias; 1414 } 1415 1416 // Statically, we can see that the base objects are the same, but the 1417 // pointers have dynamic offsets which we can't resolve. And none of our 1418 // little tricks above worked. 1419 return MayAlias; 1420 } 1421 1422 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1423 // If the results agree, take it. 1424 if (A == B) 1425 return A; 1426 // A mix of PartialAlias and MustAlias is PartialAlias. 1427 if ((A == PartialAlias && B == MustAlias) || 1428 (B == PartialAlias && A == MustAlias)) 1429 return PartialAlias; 1430 // Otherwise, we don't know anything. 1431 return MayAlias; 1432 } 1433 1434 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1435 /// against another. 1436 AliasResult 1437 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, 1438 const AAMDNodes &SIAAInfo, const Value *V2, 1439 LocationSize V2Size, const AAMDNodes &V2AAInfo, 1440 const Value *UnderV2, AAQueryInfo &AAQI) { 1441 // If the values are Selects with the same condition, we can do a more precise 1442 // check: just check for aliases between the values on corresponding arms. 1443 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1444 if (SI->getCondition() == SI2->getCondition()) { 1445 AliasResult Alias = 1446 aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, SI2->getTrueValue(), 1447 V2Size, V2AAInfo, AAQI); 1448 if (Alias == MayAlias) 1449 return MayAlias; 1450 AliasResult ThisAlias = 1451 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo, 1452 SI2->getFalseValue(), V2Size, V2AAInfo, AAQI); 1453 return MergeAliasResults(ThisAlias, Alias); 1454 } 1455 1456 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1457 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1458 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), 1459 SISize, SIAAInfo, AAQI, UnderV2); 1460 if (Alias == MayAlias) 1461 return MayAlias; 1462 1463 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), 1464 SISize, SIAAInfo, AAQI, UnderV2); 1465 return MergeAliasResults(ThisAlias, Alias); 1466 } 1467 1468 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1469 /// another. 1470 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, 1471 const AAMDNodes &PNAAInfo, const Value *V2, 1472 LocationSize V2Size, 1473 const AAMDNodes &V2AAInfo, 1474 const Value *UnderV2, AAQueryInfo &AAQI) { 1475 // If the values are PHIs in the same block, we can do a more precise 1476 // as well as efficient check: just check for aliases between the values 1477 // on corresponding edges. 1478 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1479 if (PN2->getParent() == PN->getParent()) { 1480 AAQueryInfo::LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo), 1481 MemoryLocation(V2, V2Size, V2AAInfo)); 1482 if (PN > V2) 1483 std::swap(Locs.first, Locs.second); 1484 // Analyse the PHIs' inputs under the assumption that the PHIs are 1485 // NoAlias. 1486 // If the PHIs are May/MustAlias there must be (recursively) an input 1487 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or 1488 // there must be an operation on the PHIs within the PHIs' value cycle 1489 // that causes a MayAlias. 1490 // Pretend the phis do not alias. 1491 AliasResult Alias = NoAlias; 1492 AliasResult OrigAliasResult; 1493 { 1494 // Limited lifetime iterator invalidated by the aliasCheck call below. 1495 auto CacheIt = AAQI.AliasCache.find(Locs); 1496 assert((CacheIt != AAQI.AliasCache.end()) && 1497 "There must exist an entry for the phi node"); 1498 OrigAliasResult = CacheIt->second; 1499 CacheIt->second = NoAlias; 1500 } 1501 1502 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1503 AliasResult ThisAlias = 1504 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo, 1505 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1506 V2Size, V2AAInfo, AAQI); 1507 Alias = MergeAliasResults(ThisAlias, Alias); 1508 if (Alias == MayAlias) 1509 break; 1510 } 1511 1512 // Reset if speculation failed. 1513 if (Alias != NoAlias) 1514 AAQI.updateResult(Locs, OrigAliasResult); 1515 return Alias; 1516 } 1517 1518 SmallVector<Value *, 4> V1Srcs; 1519 // If a phi operand recurses back to the phi, we can still determine NoAlias 1520 // if we don't alias the underlying objects of the other phi operands, as we 1521 // know that the recursive phi needs to be based on them in some way. 1522 bool isRecursive = false; 1523 auto CheckForRecPhi = [&](Value *PV) { 1524 if (!EnableRecPhiAnalysis) 1525 return false; 1526 if (getUnderlyingObject(PV) == PN) { 1527 isRecursive = true; 1528 return true; 1529 } 1530 return false; 1531 }; 1532 1533 if (PV) { 1534 // If we have PhiValues then use it to get the underlying phi values. 1535 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN); 1536 // If we have more phi values than the search depth then return MayAlias 1537 // conservatively to avoid compile time explosion. The worst possible case 1538 // is if both sides are PHI nodes. In which case, this is O(m x n) time 1539 // where 'm' and 'n' are the number of PHI sources. 1540 if (PhiValueSet.size() > MaxLookupSearchDepth) 1541 return MayAlias; 1542 // Add the values to V1Srcs 1543 for (Value *PV1 : PhiValueSet) { 1544 if (CheckForRecPhi(PV1)) 1545 continue; 1546 V1Srcs.push_back(PV1); 1547 } 1548 } else { 1549 // If we don't have PhiInfo then just look at the operands of the phi itself 1550 // FIXME: Remove this once we can guarantee that we have PhiInfo always 1551 SmallPtrSet<Value *, 4> UniqueSrc; 1552 for (Value *PV1 : PN->incoming_values()) { 1553 if (isa<PHINode>(PV1)) 1554 // If any of the source itself is a PHI, return MayAlias conservatively 1555 // to avoid compile time explosion. The worst possible case is if both 1556 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1557 // and 'n' are the number of PHI sources. 1558 return MayAlias; 1559 1560 if (CheckForRecPhi(PV1)) 1561 continue; 1562 1563 if (UniqueSrc.insert(PV1).second) 1564 V1Srcs.push_back(PV1); 1565 } 1566 } 1567 1568 // If V1Srcs is empty then that means that the phi has no underlying non-phi 1569 // value. This should only be possible in blocks unreachable from the entry 1570 // block, but return MayAlias just in case. 1571 if (V1Srcs.empty()) 1572 return MayAlias; 1573 1574 // If this PHI node is recursive, indicate that the pointer may be moved 1575 // across iterations. We can only prove NoAlias if different underlying 1576 // objects are involved. 1577 if (isRecursive) 1578 PNSize = LocationSize::beforeOrAfterPointer(); 1579 1580 // In the recursive alias queries below, we may compare values from two 1581 // different loop iterations. Keep track of visited phi blocks, which will 1582 // be used when determining value equivalence. 1583 bool BlockInserted = VisitedPhiBBs.insert(PN->getParent()).second; 1584 auto _ = make_scope_exit([&]() { 1585 if (BlockInserted) 1586 VisitedPhiBBs.erase(PN->getParent()); 1587 }); 1588 1589 // If we inserted a block into VisitedPhiBBs, alias analysis results that 1590 // have been cached earlier may no longer be valid. Perform recursive queries 1591 // with a new AAQueryInfo. 1592 AAQueryInfo NewAAQI; 1593 AAQueryInfo *UseAAQI = BlockInserted ? &NewAAQI : &AAQI; 1594 1595 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize, 1596 PNAAInfo, *UseAAQI, UnderV2); 1597 1598 // Early exit if the check of the first PHI source against V2 is MayAlias. 1599 // Other results are not possible. 1600 if (Alias == MayAlias) 1601 return MayAlias; 1602 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will 1603 // remain valid to all elements and needs to conservatively return MayAlias. 1604 if (isRecursive && Alias != NoAlias) 1605 return MayAlias; 1606 1607 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1608 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1609 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1610 Value *V = V1Srcs[i]; 1611 1612 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, 1613 PNAAInfo, *UseAAQI, UnderV2); 1614 Alias = MergeAliasResults(ThisAlias, Alias); 1615 if (Alias == MayAlias) 1616 break; 1617 } 1618 1619 return Alias; 1620 } 1621 1622 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1623 /// array references. 1624 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, 1625 const AAMDNodes &V1AAInfo, 1626 const Value *V2, LocationSize V2Size, 1627 const AAMDNodes &V2AAInfo, 1628 AAQueryInfo &AAQI, const Value *O1, 1629 const Value *O2) { 1630 // If either of the memory references is empty, it doesn't matter what the 1631 // pointer values are. 1632 if (V1Size.isZero() || V2Size.isZero()) 1633 return NoAlias; 1634 1635 // Strip off any casts if they exist. 1636 V1 = V1->stripPointerCastsAndInvariantGroups(); 1637 V2 = V2->stripPointerCastsAndInvariantGroups(); 1638 1639 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1640 // value for undef that aliases nothing in the program. 1641 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1642 return NoAlias; 1643 1644 // Are we checking for alias of the same value? 1645 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1646 // different iterations. We must therefore make sure that this is not the 1647 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1648 // happen by looking at the visited phi nodes and making sure they cannot 1649 // reach the value. 1650 if (isValueEqualInPotentialCycles(V1, V2)) 1651 return MustAlias; 1652 1653 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1654 return NoAlias; // Scalars cannot alias each other 1655 1656 // Figure out what objects these things are pointing to if we can. 1657 if (O1 == nullptr) 1658 O1 = getUnderlyingObject(V1, MaxLookupSearchDepth); 1659 1660 if (O2 == nullptr) 1661 O2 = getUnderlyingObject(V2, MaxLookupSearchDepth); 1662 1663 // Null values in the default address space don't point to any object, so they 1664 // don't alias any other pointer. 1665 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1666 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1667 return NoAlias; 1668 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1669 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1670 return NoAlias; 1671 1672 if (O1 != O2) { 1673 // If V1/V2 point to two different objects, we know that we have no alias. 1674 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1675 return NoAlias; 1676 1677 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1678 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1679 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1680 return NoAlias; 1681 1682 // Function arguments can't alias with things that are known to be 1683 // unambigously identified at the function level. 1684 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1685 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1686 return NoAlias; 1687 1688 // If one pointer is the result of a call/invoke or load and the other is a 1689 // non-escaping local object within the same function, then we know the 1690 // object couldn't escape to a point where the call could return it. 1691 // 1692 // Note that if the pointers are in different functions, there are a 1693 // variety of complications. A call with a nocapture argument may still 1694 // temporary store the nocapture argument's value in a temporary memory 1695 // location if that memory location doesn't escape. Or it may pass a 1696 // nocapture value to other functions as long as they don't capture it. 1697 if (isEscapeSource(O1) && 1698 isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache)) 1699 return NoAlias; 1700 if (isEscapeSource(O2) && 1701 isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache)) 1702 return NoAlias; 1703 } 1704 1705 // If the size of one access is larger than the entire object on the other 1706 // side, then we know such behavior is undefined and can assume no alias. 1707 bool NullIsValidLocation = NullPointerIsDefined(&F); 1708 if ((isObjectSmallerThan( 1709 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL, 1710 TLI, NullIsValidLocation)) || 1711 (isObjectSmallerThan( 1712 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL, 1713 TLI, NullIsValidLocation))) 1714 return NoAlias; 1715 1716 // If one the accesses may be before the accessed pointer, canonicalize this 1717 // by using unknown after-pointer sizes for both accesses. This is 1718 // equivalent, because regardless of which pointer is lower, one of them 1719 // will always came after the other, as long as the underlying objects aren't 1720 // disjoint. We do this so that the rest of BasicAA does not have to deal 1721 // with accesses before the base pointer, and to improve cache utilization by 1722 // merging equivalent states. 1723 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) { 1724 V1Size = LocationSize::afterPointer(); 1725 V2Size = LocationSize::afterPointer(); 1726 } 1727 1728 // Check the cache before climbing up use-def chains. This also terminates 1729 // otherwise infinitely recursive queries. 1730 AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo), 1731 MemoryLocation(V2, V2Size, V2AAInfo)); 1732 if (V1 > V2) 1733 std::swap(Locs.first, Locs.second); 1734 std::pair<AAQueryInfo::AliasCacheT::iterator, bool> Pair = 1735 AAQI.AliasCache.try_emplace(Locs, MayAlias); 1736 if (!Pair.second) 1737 return Pair.first->second; 1738 1739 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1740 AliasResult Result = 1741 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI); 1742 if (Result != MayAlias) 1743 return AAQI.updateResult(Locs, Result); 1744 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) { 1745 AliasResult Result = 1746 aliasGEP(GV2, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, O2, O1, AAQI); 1747 if (Result != MayAlias) 1748 return AAQI.updateResult(Locs, Result); 1749 } 1750 1751 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1752 AliasResult Result = 1753 aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI); 1754 if (Result != MayAlias) 1755 return AAQI.updateResult(Locs, Result); 1756 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) { 1757 AliasResult Result = 1758 aliasPHI(PN, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, O1, AAQI); 1759 if (Result != MayAlias) 1760 return AAQI.updateResult(Locs, Result); 1761 } 1762 1763 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1764 AliasResult Result = 1765 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI); 1766 if (Result != MayAlias) 1767 return AAQI.updateResult(Locs, Result); 1768 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) { 1769 AliasResult Result = 1770 aliasSelect(S2, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, O1, AAQI); 1771 if (Result != MayAlias) 1772 return AAQI.updateResult(Locs, Result); 1773 } 1774 1775 // If both pointers are pointing into the same object and one of them 1776 // accesses the entire object, then the accesses must overlap in some way. 1777 if (O1 == O2) 1778 if (V1Size.isPrecise() && V2Size.isPrecise() && 1779 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || 1780 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) 1781 return AAQI.updateResult(Locs, PartialAlias); 1782 1783 // Recurse back into the best AA results we have, potentially with refined 1784 // memory locations. We have already ensured that BasicAA has a MayAlias 1785 // cache result for these, so any recursion back into BasicAA won't loop. 1786 AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second, AAQI); 1787 if (Result != MayAlias) 1788 return AAQI.updateResult(Locs, Result); 1789 1790 // MayAlias is already in the cache. 1791 return MayAlias; 1792 } 1793 1794 /// Check whether two Values can be considered equivalent. 1795 /// 1796 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1797 /// they can not be part of a cycle in the value graph by looking at all 1798 /// visited phi nodes an making sure that the phis cannot reach the value. We 1799 /// have to do this because we are looking through phi nodes (That is we say 1800 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1801 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1802 const Value *V2) { 1803 if (V != V2) 1804 return false; 1805 1806 const Instruction *Inst = dyn_cast<Instruction>(V); 1807 if (!Inst) 1808 return true; 1809 1810 if (VisitedPhiBBs.empty()) 1811 return true; 1812 1813 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1814 return false; 1815 1816 // Make sure that the visited phis cannot reach the Value. This ensures that 1817 // the Values cannot come from different iterations of a potential cycle the 1818 // phi nodes could be involved in. 1819 for (auto *P : VisitedPhiBBs) 1820 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT, LI)) 1821 return false; 1822 1823 return true; 1824 } 1825 1826 /// Computes the symbolic difference between two de-composed GEPs. 1827 /// 1828 /// Dest and Src are the variable indices from two decomposed GetElementPtr 1829 /// instructions GEP1 and GEP2 which have common base pointers. 1830 void BasicAAResult::GetIndexDifference( 1831 SmallVectorImpl<VariableGEPIndex> &Dest, 1832 const SmallVectorImpl<VariableGEPIndex> &Src) { 1833 if (Src.empty()) 1834 return; 1835 1836 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 1837 const Value *V = Src[i].V; 1838 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits; 1839 APInt Scale = Src[i].Scale; 1840 1841 // Find V in Dest. This is N^2, but pointer indices almost never have more 1842 // than a few variable indexes. 1843 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 1844 if (!isValueEqualInPotentialCycles(Dest[j].V, V) || 1845 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits) 1846 continue; 1847 1848 // If we found it, subtract off Scale V's from the entry in Dest. If it 1849 // goes to zero, remove the entry. 1850 if (Dest[j].Scale != Scale) 1851 Dest[j].Scale -= Scale; 1852 else 1853 Dest.erase(Dest.begin() + j); 1854 Scale = 0; 1855 break; 1856 } 1857 1858 // If we didn't consume this entry, add it to the end of the Dest list. 1859 if (!!Scale) { 1860 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale}; 1861 Dest.push_back(Entry); 1862 } 1863 } 1864 } 1865 1866 bool BasicAAResult::constantOffsetHeuristic( 1867 const SmallVectorImpl<VariableGEPIndex> &VarIndices, 1868 LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset, 1869 AssumptionCache *AC, DominatorTree *DT) { 1870 if (VarIndices.size() != 2 || !MaybeV1Size.hasValue() || 1871 !MaybeV2Size.hasValue()) 1872 return false; 1873 1874 const uint64_t V1Size = MaybeV1Size.getValue(); 1875 const uint64_t V2Size = MaybeV2Size.getValue(); 1876 1877 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1]; 1878 1879 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits || 1880 Var0.Scale != -Var1.Scale) 1881 return false; 1882 1883 unsigned Width = Var1.V->getType()->getIntegerBitWidth(); 1884 1885 // We'll strip off the Extensions of Var0 and Var1 and do another round 1886 // of GetLinearExpression decomposition. In the example above, if Var0 1887 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 1888 1889 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0), 1890 V1Offset(Width, 0); 1891 bool NSW = true, NUW = true; 1892 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0; 1893 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits, 1894 V0SExtBits, DL, 0, AC, DT, NSW, NUW); 1895 NSW = true; 1896 NUW = true; 1897 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits, 1898 V1SExtBits, DL, 0, AC, DT, NSW, NUW); 1899 1900 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits || 1901 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1)) 1902 return false; 1903 1904 // We have a hit - Var0 and Var1 only differ by a constant offset! 1905 1906 // If we've been sext'ed then zext'd the maximum difference between Var0 and 1907 // Var1 is possible to calculate, but we're just interested in the absolute 1908 // minimum difference between the two. The minimum distance may occur due to 1909 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 1910 // the minimum distance between %i and %i + 5 is 3. 1911 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff; 1912 MinDiff = APIntOps::umin(MinDiff, Wrapped); 1913 APInt MinDiffBytes = 1914 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs(); 1915 1916 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 1917 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 1918 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 1919 // V2Size can fit in the MinDiffBytes gap. 1920 return MinDiffBytes.uge(V1Size + BaseOffset.abs()) && 1921 MinDiffBytes.uge(V2Size + BaseOffset.abs()); 1922 } 1923 1924 //===----------------------------------------------------------------------===// 1925 // BasicAliasAnalysis Pass 1926 //===----------------------------------------------------------------------===// 1927 1928 AnalysisKey BasicAA::Key; 1929 1930 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 1931 return BasicAAResult(F.getParent()->getDataLayout(), 1932 F, 1933 AM.getResult<TargetLibraryAnalysis>(F), 1934 AM.getResult<AssumptionAnalysis>(F), 1935 &AM.getResult<DominatorTreeAnalysis>(F), 1936 AM.getCachedResult<LoopAnalysis>(F), 1937 AM.getCachedResult<PhiValuesAnalysis>(F)); 1938 } 1939 1940 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 1941 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 1942 } 1943 1944 char BasicAAWrapperPass::ID = 0; 1945 1946 void BasicAAWrapperPass::anchor() {} 1947 1948 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", 1949 "Basic Alias Analysis (stateless AA impl)", true, true) 1950 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1951 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1952 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1953 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 1954 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa", 1955 "Basic Alias Analysis (stateless AA impl)", true, true) 1956 1957 FunctionPass *llvm::createBasicAAWrapperPass() { 1958 return new BasicAAWrapperPass(); 1959 } 1960 1961 bool BasicAAWrapperPass::runOnFunction(Function &F) { 1962 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 1963 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 1964 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 1965 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 1966 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>(); 1967 1968 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, 1969 TLIWP.getTLI(F), ACT.getAssumptionCache(F), 1970 &DTWP.getDomTree(), 1971 LIWP ? &LIWP->getLoopInfo() : nullptr, 1972 PVWP ? &PVWP->getResult() : nullptr)); 1973 1974 return false; 1975 } 1976 1977 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1978 AU.setPreservesAll(); 1979 AU.addRequired<AssumptionCacheTracker>(); 1980 AU.addRequired<DominatorTreeWrapperPass>(); 1981 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1982 AU.addUsedIfAvailable<PhiValuesWrapperPass>(); 1983 } 1984 1985 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 1986 return BasicAAResult( 1987 F.getParent()->getDataLayout(), F, 1988 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 1989 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 1990 } 1991