1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the primary stateless implementation of the 10 // Alias Analysis interface that implements identities (two different 11 // globals cannot alias, etc), but does no stateful analysis. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Analysis/BasicAliasAnalysis.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/AssumptionCache.h" 22 #include "llvm/Analysis/CFG.h" 23 #include "llvm/Analysis/CaptureTracking.h" 24 #include "llvm/Analysis/InstructionSimplify.h" 25 #include "llvm/Analysis/LoopInfo.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/MemoryLocation.h" 28 #include "llvm/Analysis/PhiValues.h" 29 #include "llvm/Analysis/TargetLibraryInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/Dominators.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/IR/GetElementPtrTypeIterator.h" 40 #include "llvm/IR/GlobalAlias.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/InstrTypes.h" 43 #include "llvm/IR/Instruction.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/IR/Intrinsics.h" 47 #include "llvm/IR/Metadata.h" 48 #include "llvm/IR/Operator.h" 49 #include "llvm/IR/Type.h" 50 #include "llvm/IR/User.h" 51 #include "llvm/IR/Value.h" 52 #include "llvm/InitializePasses.h" 53 #include "llvm/Pass.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/KnownBits.h" 58 #include <cassert> 59 #include <cstdint> 60 #include <cstdlib> 61 #include <utility> 62 63 #define DEBUG_TYPE "basicaa" 64 65 using namespace llvm; 66 67 /// Enable analysis of recursive PHI nodes. 68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden, 69 cl::init(true)); 70 71 /// By default, even on 32-bit architectures we use 64-bit integers for 72 /// calculations. This will allow us to more-aggressively decompose indexing 73 /// expressions calculated using i64 values (e.g., long long in C) which is 74 /// common enough to worry about. 75 static cl::opt<bool> ForceAtLeast64Bits("basic-aa-force-at-least-64b", 76 cl::Hidden, cl::init(true)); 77 static cl::opt<bool> DoubleCalcBits("basic-aa-double-calc-bits", 78 cl::Hidden, cl::init(false)); 79 80 /// SearchLimitReached / SearchTimes shows how often the limit of 81 /// to decompose GEPs is reached. It will affect the precision 82 /// of basic alias analysis. 83 STATISTIC(SearchLimitReached, "Number of times the limit to " 84 "decompose GEPs is reached"); 85 STATISTIC(SearchTimes, "Number of times a GEP is decomposed"); 86 87 /// Cutoff after which to stop analysing a set of phi nodes potentially involved 88 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be 89 /// careful with value equivalence. We use reachability to make sure a value 90 /// cannot be involved in a cycle. 91 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; 92 93 // The max limit of the search depth in DecomposeGEPExpression() and 94 // getUnderlyingObject(), both functions need to use the same search 95 // depth otherwise the algorithm in aliasGEP will assert. 96 static const unsigned MaxLookupSearchDepth = 6; 97 98 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA, 99 FunctionAnalysisManager::Invalidator &Inv) { 100 // We don't care if this analysis itself is preserved, it has no state. But 101 // we need to check that the analyses it depends on have been. Note that we 102 // may be created without handles to some analyses and in that case don't 103 // depend on them. 104 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) || 105 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) || 106 (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) || 107 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA))) 108 return true; 109 110 // Otherwise this analysis result remains valid. 111 return false; 112 } 113 114 //===----------------------------------------------------------------------===// 115 // Useful predicates 116 //===----------------------------------------------------------------------===// 117 118 /// Returns true if the pointer is one which would have been considered an 119 /// escape by isNonEscapingLocalObject. 120 static bool isEscapeSource(const Value *V) { 121 if (isa<CallBase>(V)) 122 return true; 123 124 if (isa<Argument>(V)) 125 return true; 126 127 // The load case works because isNonEscapingLocalObject considers all 128 // stores to be escapes (it passes true for the StoreCaptures argument 129 // to PointerMayBeCaptured). 130 if (isa<LoadInst>(V)) 131 return true; 132 133 return false; 134 } 135 136 /// Returns the size of the object specified by V or UnknownSize if unknown. 137 static uint64_t getObjectSize(const Value *V, const DataLayout &DL, 138 const TargetLibraryInfo &TLI, 139 bool NullIsValidLoc, 140 bool RoundToAlign = false) { 141 uint64_t Size; 142 ObjectSizeOpts Opts; 143 Opts.RoundToAlign = RoundToAlign; 144 Opts.NullIsUnknownSize = NullIsValidLoc; 145 if (getObjectSize(V, Size, DL, &TLI, Opts)) 146 return Size; 147 return MemoryLocation::UnknownSize; 148 } 149 150 /// Returns true if we can prove that the object specified by V is smaller than 151 /// Size. 152 static bool isObjectSmallerThan(const Value *V, uint64_t Size, 153 const DataLayout &DL, 154 const TargetLibraryInfo &TLI, 155 bool NullIsValidLoc) { 156 // Note that the meanings of the "object" are slightly different in the 157 // following contexts: 158 // c1: llvm::getObjectSize() 159 // c2: llvm.objectsize() intrinsic 160 // c3: isObjectSmallerThan() 161 // c1 and c2 share the same meaning; however, the meaning of "object" in c3 162 // refers to the "entire object". 163 // 164 // Consider this example: 165 // char *p = (char*)malloc(100) 166 // char *q = p+80; 167 // 168 // In the context of c1 and c2, the "object" pointed by q refers to the 169 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20. 170 // 171 // However, in the context of c3, the "object" refers to the chunk of memory 172 // being allocated. So, the "object" has 100 bytes, and q points to the middle 173 // the "object". In case q is passed to isObjectSmallerThan() as the 1st 174 // parameter, before the llvm::getObjectSize() is called to get the size of 175 // entire object, we should: 176 // - either rewind the pointer q to the base-address of the object in 177 // question (in this case rewind to p), or 178 // - just give up. It is up to caller to make sure the pointer is pointing 179 // to the base address the object. 180 // 181 // We go for 2nd option for simplicity. 182 if (!isIdentifiedObject(V)) 183 return false; 184 185 // This function needs to use the aligned object size because we allow 186 // reads a bit past the end given sufficient alignment. 187 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc, 188 /*RoundToAlign*/ true); 189 190 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size; 191 } 192 193 /// Return the minimal extent from \p V to the end of the underlying object, 194 /// assuming the result is used in an aliasing query. E.g., we do use the query 195 /// location size and the fact that null pointers cannot alias here. 196 static uint64_t getMinimalExtentFrom(const Value &V, 197 const LocationSize &LocSize, 198 const DataLayout &DL, 199 bool NullIsValidLoc) { 200 // If we have dereferenceability information we know a lower bound for the 201 // extent as accesses for a lower offset would be valid. We need to exclude 202 // the "or null" part if null is a valid pointer. 203 bool CanBeNull; 204 uint64_t DerefBytes = V.getPointerDereferenceableBytes(DL, CanBeNull); 205 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes; 206 // If queried with a precise location size, we assume that location size to be 207 // accessed, thus valid. 208 if (LocSize.isPrecise()) 209 DerefBytes = std::max(DerefBytes, LocSize.getValue()); 210 return DerefBytes; 211 } 212 213 /// Returns true if we can prove that the object specified by V has size Size. 214 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, 215 const TargetLibraryInfo &TLI, bool NullIsValidLoc) { 216 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc); 217 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size; 218 } 219 220 //===----------------------------------------------------------------------===// 221 // GetElementPtr Instruction Decomposition and Analysis 222 //===----------------------------------------------------------------------===// 223 224 /// Analyzes the specified value as a linear expression: "A*V + B", where A and 225 /// B are constant integers. 226 /// 227 /// Returns the scale and offset values as APInts and return V as a Value*, and 228 /// return whether we looked through any sign or zero extends. The incoming 229 /// Value is known to have IntegerType, and it may already be sign or zero 230 /// extended. 231 /// 232 /// Note that this looks through extends, so the high bits may not be 233 /// represented in the result. 234 /*static*/ const Value *BasicAAResult::GetLinearExpression( 235 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits, 236 unsigned &SExtBits, const DataLayout &DL, unsigned Depth, 237 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) { 238 assert(V->getType()->isIntegerTy() && "Not an integer value"); 239 240 // Limit our recursion depth. 241 if (Depth == 6) { 242 Scale = 1; 243 Offset = 0; 244 return V; 245 } 246 247 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) { 248 // If it's a constant, just convert it to an offset and remove the variable. 249 // If we've been called recursively, the Offset bit width will be greater 250 // than the constant's (the Offset's always as wide as the outermost call), 251 // so we'll zext here and process any extension in the isa<SExtInst> & 252 // isa<ZExtInst> cases below. 253 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth()); 254 assert(Scale == 0 && "Constant values don't have a scale"); 255 return V; 256 } 257 258 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) { 259 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) { 260 // If we've been called recursively, then Offset and Scale will be wider 261 // than the BOp operands. We'll always zext it here as we'll process sign 262 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases). 263 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth()); 264 265 switch (BOp->getOpcode()) { 266 default: 267 // We don't understand this instruction, so we can't decompose it any 268 // further. 269 Scale = 1; 270 Offset = 0; 271 return V; 272 case Instruction::Or: 273 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't 274 // analyze it. 275 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC, 276 BOp, DT)) { 277 Scale = 1; 278 Offset = 0; 279 return V; 280 } 281 LLVM_FALLTHROUGH; 282 case Instruction::Add: 283 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 284 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 285 Offset += RHS; 286 break; 287 case Instruction::Sub: 288 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 289 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 290 Offset -= RHS; 291 break; 292 case Instruction::Mul: 293 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 294 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 295 Offset *= RHS; 296 Scale *= RHS; 297 break; 298 case Instruction::Shl: 299 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, 300 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); 301 302 // We're trying to linearize an expression of the kind: 303 // shl i8 -128, 36 304 // where the shift count exceeds the bitwidth of the type. 305 // We can't decompose this further (the expression would return 306 // a poison value). 307 if (Offset.getBitWidth() < RHS.getLimitedValue() || 308 Scale.getBitWidth() < RHS.getLimitedValue()) { 309 Scale = 1; 310 Offset = 0; 311 return V; 312 } 313 314 Offset <<= RHS.getLimitedValue(); 315 Scale <<= RHS.getLimitedValue(); 316 // the semantics of nsw and nuw for left shifts don't match those of 317 // multiplications, so we won't propagate them. 318 NSW = NUW = false; 319 return V; 320 } 321 322 if (isa<OverflowingBinaryOperator>(BOp)) { 323 NUW &= BOp->hasNoUnsignedWrap(); 324 NSW &= BOp->hasNoSignedWrap(); 325 } 326 return V; 327 } 328 } 329 330 // Since GEP indices are sign extended anyway, we don't care about the high 331 // bits of a sign or zero extended value - just scales and offsets. The 332 // extensions have to be consistent though. 333 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) { 334 Value *CastOp = cast<CastInst>(V)->getOperand(0); 335 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits(); 336 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits(); 337 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits; 338 const Value *Result = 339 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL, 340 Depth + 1, AC, DT, NSW, NUW); 341 342 // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this 343 // by just incrementing the number of bits we've extended by. 344 unsigned ExtendedBy = NewWidth - SmallWidth; 345 346 if (isa<SExtInst>(V) && ZExtBits == 0) { 347 // sext(sext(%x, a), b) == sext(%x, a + b) 348 349 if (NSW) { 350 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c) 351 // into sext(%x) + sext(c). We'll sext the Offset ourselves: 352 unsigned OldWidth = Offset.getBitWidth(); 353 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth); 354 } else { 355 // We may have signed-wrapped, so don't decompose sext(%x + c) into 356 // sext(%x) + sext(c) 357 Scale = 1; 358 Offset = 0; 359 Result = CastOp; 360 ZExtBits = OldZExtBits; 361 SExtBits = OldSExtBits; 362 } 363 SExtBits += ExtendedBy; 364 } else { 365 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b) 366 367 if (!NUW) { 368 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into 369 // zext(%x) + zext(c) 370 Scale = 1; 371 Offset = 0; 372 Result = CastOp; 373 ZExtBits = OldZExtBits; 374 SExtBits = OldSExtBits; 375 } 376 ZExtBits += ExtendedBy; 377 } 378 379 return Result; 380 } 381 382 Scale = 1; 383 Offset = 0; 384 return V; 385 } 386 387 /// To ensure a pointer offset fits in an integer of size PointerSize 388 /// (in bits) when that size is smaller than the maximum pointer size. This is 389 /// an issue, for example, in particular for 32b pointers with negative indices 390 /// that rely on two's complement wrap-arounds for precise alias information 391 /// where the maximum pointer size is 64b. 392 static APInt adjustToPointerSize(const APInt &Offset, unsigned PointerSize) { 393 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!"); 394 unsigned ShiftBits = Offset.getBitWidth() - PointerSize; 395 return (Offset << ShiftBits).ashr(ShiftBits); 396 } 397 398 static unsigned getMaxPointerSize(const DataLayout &DL) { 399 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits(); 400 if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64; 401 if (DoubleCalcBits) MaxPointerSize *= 2; 402 403 return MaxPointerSize; 404 } 405 406 /// If V is a symbolic pointer expression, decompose it into a base pointer 407 /// with a constant offset and a number of scaled symbolic offsets. 408 /// 409 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale 410 /// in the VarIndices vector) are Value*'s that are known to be scaled by the 411 /// specified amount, but which may have other unrepresented high bits. As 412 /// such, the gep cannot necessarily be reconstructed from its decomposed form. 413 /// 414 /// This function is capable of analyzing everything that getUnderlyingObject 415 /// can look through. To be able to do that getUnderlyingObject and 416 /// DecomposeGEPExpression must use the same search depth 417 /// (MaxLookupSearchDepth). 418 bool BasicAAResult::DecomposeGEPExpression(const Value *V, 419 DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC, 420 DominatorTree *DT) { 421 // Limit recursion depth to limit compile time in crazy cases. 422 unsigned MaxLookup = MaxLookupSearchDepth; 423 SearchTimes++; 424 425 unsigned MaxPointerSize = getMaxPointerSize(DL); 426 Decomposed.VarIndices.clear(); 427 do { 428 // See if this is a bitcast or GEP. 429 const Operator *Op = dyn_cast<Operator>(V); 430 if (!Op) { 431 // The only non-operator case we can handle are GlobalAliases. 432 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 433 if (!GA->isInterposable()) { 434 V = GA->getAliasee(); 435 continue; 436 } 437 } 438 Decomposed.Base = V; 439 return false; 440 } 441 442 if (Op->getOpcode() == Instruction::BitCast || 443 Op->getOpcode() == Instruction::AddrSpaceCast) { 444 V = Op->getOperand(0); 445 continue; 446 } 447 448 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op); 449 if (!GEPOp) { 450 if (const auto *PHI = dyn_cast<PHINode>(V)) { 451 // Look through single-arg phi nodes created by LCSSA. 452 if (PHI->getNumIncomingValues() == 1) { 453 V = PHI->getIncomingValue(0); 454 continue; 455 } 456 } else if (const auto *Call = dyn_cast<CallBase>(V)) { 457 // CaptureTracking can know about special capturing properties of some 458 // intrinsics like launder.invariant.group, that can't be expressed with 459 // the attributes, but have properties like returning aliasing pointer. 460 // Because some analysis may assume that nocaptured pointer is not 461 // returned from some special intrinsic (because function would have to 462 // be marked with returns attribute), it is crucial to use this function 463 // because it should be in sync with CaptureTracking. Not using it may 464 // cause weird miscompilations where 2 aliasing pointers are assumed to 465 // noalias. 466 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 467 V = RP; 468 continue; 469 } 470 } 471 472 Decomposed.Base = V; 473 return false; 474 } 475 476 // Don't attempt to analyze GEPs over unsized objects. 477 if (!GEPOp->getSourceElementType()->isSized()) { 478 Decomposed.Base = V; 479 return false; 480 } 481 482 // Don't attempt to analyze GEPs if index scale is not a compile-time 483 // constant. 484 if (isa<ScalableVectorType>(GEPOp->getSourceElementType())) { 485 Decomposed.Base = V; 486 Decomposed.HasCompileTimeConstantScale = false; 487 return false; 488 } 489 490 unsigned AS = GEPOp->getPointerAddressSpace(); 491 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. 492 gep_type_iterator GTI = gep_type_begin(GEPOp); 493 unsigned PointerSize = DL.getPointerSizeInBits(AS); 494 // Assume all GEP operands are constants until proven otherwise. 495 bool GepHasConstantOffset = true; 496 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); 497 I != E; ++I, ++GTI) { 498 const Value *Index = *I; 499 // Compute the (potentially symbolic) offset in bytes for this index. 500 if (StructType *STy = GTI.getStructTypeOrNull()) { 501 // For a struct, add the member offset. 502 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); 503 if (FieldNo == 0) 504 continue; 505 506 Decomposed.StructOffset += 507 DL.getStructLayout(STy)->getElementOffset(FieldNo); 508 continue; 509 } 510 511 // For an array/pointer, add the element offset, explicitly scaled. 512 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) { 513 if (CIdx->isZero()) 514 continue; 515 Decomposed.OtherOffset += 516 (DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize() * 517 CIdx->getValue().sextOrSelf(MaxPointerSize)) 518 .sextOrTrunc(MaxPointerSize); 519 continue; 520 } 521 522 GepHasConstantOffset = false; 523 524 APInt Scale(MaxPointerSize, 525 DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize()); 526 unsigned ZExtBits = 0, SExtBits = 0; 527 528 // If the integer type is smaller than the pointer size, it is implicitly 529 // sign extended to pointer size. 530 unsigned Width = Index->getType()->getIntegerBitWidth(); 531 if (PointerSize > Width) 532 SExtBits += PointerSize - Width; 533 534 // Use GetLinearExpression to decompose the index into a C1*V+C2 form. 535 APInt IndexScale(Width, 0), IndexOffset(Width, 0); 536 bool NSW = true, NUW = true; 537 const Value *OrigIndex = Index; 538 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits, 539 SExtBits, DL, 0, AC, DT, NSW, NUW); 540 541 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale. 542 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale. 543 544 // It can be the case that, even through C1*V+C2 does not overflow for 545 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot 546 // decompose the expression in this way. 547 // 548 // FIXME: C1*Scale and the other operations in the decomposed 549 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this 550 // possibility. 551 APInt WideScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize*2) * 552 Scale.sext(MaxPointerSize*2); 553 if (WideScaledOffset.getMinSignedBits() > MaxPointerSize) { 554 Index = OrigIndex; 555 IndexScale = 1; 556 IndexOffset = 0; 557 558 ZExtBits = SExtBits = 0; 559 if (PointerSize > Width) 560 SExtBits += PointerSize - Width; 561 } else { 562 Decomposed.OtherOffset += IndexOffset.sextOrTrunc(MaxPointerSize) * Scale; 563 Scale *= IndexScale.sextOrTrunc(MaxPointerSize); 564 } 565 566 // If we already had an occurrence of this index variable, merge this 567 // scale into it. For example, we want to handle: 568 // A[x][x] -> x*16 + x*4 -> x*20 569 // This also ensures that 'x' only appears in the index list once. 570 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { 571 if (Decomposed.VarIndices[i].V == Index && 572 Decomposed.VarIndices[i].ZExtBits == ZExtBits && 573 Decomposed.VarIndices[i].SExtBits == SExtBits) { 574 Scale += Decomposed.VarIndices[i].Scale; 575 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i); 576 break; 577 } 578 } 579 580 // Make sure that we have a scale that makes sense for this target's 581 // pointer size. 582 Scale = adjustToPointerSize(Scale, PointerSize); 583 584 if (!!Scale) { 585 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale}; 586 Decomposed.VarIndices.push_back(Entry); 587 } 588 } 589 590 // Take care of wrap-arounds 591 if (GepHasConstantOffset) { 592 Decomposed.StructOffset = 593 adjustToPointerSize(Decomposed.StructOffset, PointerSize); 594 Decomposed.OtherOffset = 595 adjustToPointerSize(Decomposed.OtherOffset, PointerSize); 596 } 597 598 // Analyze the base pointer next. 599 V = GEPOp->getOperand(0); 600 } while (--MaxLookup); 601 602 // If the chain of expressions is too deep, just return early. 603 Decomposed.Base = V; 604 SearchLimitReached++; 605 return true; 606 } 607 608 /// Returns whether the given pointer value points to memory that is local to 609 /// the function, with global constants being considered local to all 610 /// functions. 611 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc, 612 AAQueryInfo &AAQI, bool OrLocal) { 613 assert(Visited.empty() && "Visited must be cleared after use!"); 614 615 unsigned MaxLookup = 8; 616 SmallVector<const Value *, 16> Worklist; 617 Worklist.push_back(Loc.Ptr); 618 do { 619 const Value *V = getUnderlyingObject(Worklist.pop_back_val()); 620 if (!Visited.insert(V).second) { 621 Visited.clear(); 622 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 623 } 624 625 // An alloca instruction defines local memory. 626 if (OrLocal && isa<AllocaInst>(V)) 627 continue; 628 629 // A global constant counts as local memory for our purposes. 630 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { 631 // Note: this doesn't require GV to be "ODR" because it isn't legal for a 632 // global to be marked constant in some modules and non-constant in 633 // others. GV may even be a declaration, not a definition. 634 if (!GV->isConstant()) { 635 Visited.clear(); 636 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 637 } 638 continue; 639 } 640 641 // If both select values point to local memory, then so does the select. 642 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 643 Worklist.push_back(SI->getTrueValue()); 644 Worklist.push_back(SI->getFalseValue()); 645 continue; 646 } 647 648 // If all values incoming to a phi node point to local memory, then so does 649 // the phi. 650 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 651 // Don't bother inspecting phi nodes with many operands. 652 if (PN->getNumIncomingValues() > MaxLookup) { 653 Visited.clear(); 654 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 655 } 656 for (Value *IncValue : PN->incoming_values()) 657 Worklist.push_back(IncValue); 658 continue; 659 } 660 661 // Otherwise be conservative. 662 Visited.clear(); 663 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal); 664 } while (!Worklist.empty() && --MaxLookup); 665 666 Visited.clear(); 667 return Worklist.empty(); 668 } 669 670 /// Returns the behavior when calling the given call site. 671 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) { 672 if (Call->doesNotAccessMemory()) 673 // Can't do better than this. 674 return FMRB_DoesNotAccessMemory; 675 676 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 677 678 // If the callsite knows it only reads memory, don't return worse 679 // than that. 680 if (Call->onlyReadsMemory()) 681 Min = FMRB_OnlyReadsMemory; 682 else if (Call->doesNotReadMemory()) 683 Min = FMRB_OnlyWritesMemory; 684 685 if (Call->onlyAccessesArgMemory()) 686 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 687 else if (Call->onlyAccessesInaccessibleMemory()) 688 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 689 else if (Call->onlyAccessesInaccessibleMemOrArgMem()) 690 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 691 692 // If the call has operand bundles then aliasing attributes from the function 693 // it calls do not directly apply to the call. This can be made more precise 694 // in the future. 695 if (!Call->hasOperandBundles()) 696 if (const Function *F = Call->getCalledFunction()) 697 Min = 698 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F)); 699 700 return Min; 701 } 702 703 /// Returns the behavior when calling the given function. For use when the call 704 /// site is not known. 705 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { 706 // If the function declares it doesn't access memory, we can't do better. 707 if (F->doesNotAccessMemory()) 708 return FMRB_DoesNotAccessMemory; 709 710 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior; 711 712 // If the function declares it only reads memory, go with that. 713 if (F->onlyReadsMemory()) 714 Min = FMRB_OnlyReadsMemory; 715 else if (F->doesNotReadMemory()) 716 Min = FMRB_OnlyWritesMemory; 717 718 if (F->onlyAccessesArgMemory()) 719 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); 720 else if (F->onlyAccessesInaccessibleMemory()) 721 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); 722 else if (F->onlyAccessesInaccessibleMemOrArgMem()) 723 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); 724 725 return Min; 726 } 727 728 /// Returns true if this is a writeonly (i.e Mod only) parameter. 729 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx, 730 const TargetLibraryInfo &TLI) { 731 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly)) 732 return true; 733 734 // We can bound the aliasing properties of memset_pattern16 just as we can 735 // for memcpy/memset. This is particularly important because the 736 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 737 // whenever possible. 738 // FIXME Consider handling this in InferFunctionAttr.cpp together with other 739 // attributes. 740 LibFunc F; 741 if (Call->getCalledFunction() && 742 TLI.getLibFunc(*Call->getCalledFunction(), F) && 743 F == LibFunc_memset_pattern16 && TLI.has(F)) 744 if (ArgIdx == 0) 745 return true; 746 747 // TODO: memset_pattern4, memset_pattern8 748 // TODO: _chk variants 749 // TODO: strcmp, strcpy 750 751 return false; 752 } 753 754 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call, 755 unsigned ArgIdx) { 756 // Checking for known builtin intrinsics and target library functions. 757 if (isWriteOnlyParam(Call, ArgIdx, TLI)) 758 return ModRefInfo::Mod; 759 760 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly)) 761 return ModRefInfo::Ref; 762 763 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone)) 764 return ModRefInfo::NoModRef; 765 766 return AAResultBase::getArgModRefInfo(Call, ArgIdx); 767 } 768 769 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) { 770 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call); 771 return II && II->getIntrinsicID() == IID; 772 } 773 774 #ifndef NDEBUG 775 static const Function *getParent(const Value *V) { 776 if (const Instruction *inst = dyn_cast<Instruction>(V)) { 777 if (!inst->getParent()) 778 return nullptr; 779 return inst->getParent()->getParent(); 780 } 781 782 if (const Argument *arg = dyn_cast<Argument>(V)) 783 return arg->getParent(); 784 785 return nullptr; 786 } 787 788 static bool notDifferentParent(const Value *O1, const Value *O2) { 789 790 const Function *F1 = getParent(O1); 791 const Function *F2 = getParent(O2); 792 793 return !F1 || !F2 || F1 == F2; 794 } 795 #endif 796 797 AliasResult BasicAAResult::alias(const MemoryLocation &LocA, 798 const MemoryLocation &LocB, 799 AAQueryInfo &AAQI) { 800 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) && 801 "BasicAliasAnalysis doesn't support interprocedural queries."); 802 803 // If we have a directly cached entry for these locations, we have recursed 804 // through this once, so just return the cached results. Notably, when this 805 // happens, we don't clear the cache. 806 auto CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocA, LocB)); 807 if (CacheIt != AAQI.AliasCache.end()) 808 return CacheIt->second; 809 810 CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocB, LocA)); 811 if (CacheIt != AAQI.AliasCache.end()) 812 return CacheIt->second; 813 814 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr, 815 LocB.Size, LocB.AATags, AAQI); 816 817 VisitedPhiBBs.clear(); 818 return Alias; 819 } 820 821 /// Checks to see if the specified callsite can clobber the specified memory 822 /// object. 823 /// 824 /// Since we only look at local properties of this function, we really can't 825 /// say much about this query. We do, however, use simple "address taken" 826 /// analysis on local objects. 827 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call, 828 const MemoryLocation &Loc, 829 AAQueryInfo &AAQI) { 830 assert(notDifferentParent(Call, Loc.Ptr) && 831 "AliasAnalysis query involving multiple functions!"); 832 833 const Value *Object = getUnderlyingObject(Loc.Ptr); 834 835 // Calls marked 'tail' cannot read or write allocas from the current frame 836 // because the current frame might be destroyed by the time they run. However, 837 // a tail call may use an alloca with byval. Calling with byval copies the 838 // contents of the alloca into argument registers or stack slots, so there is 839 // no lifetime issue. 840 if (isa<AllocaInst>(Object)) 841 if (const CallInst *CI = dyn_cast<CallInst>(Call)) 842 if (CI->isTailCall() && 843 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal)) 844 return ModRefInfo::NoModRef; 845 846 // Stack restore is able to modify unescaped dynamic allocas. Assume it may 847 // modify them even though the alloca is not escaped. 848 if (auto *AI = dyn_cast<AllocaInst>(Object)) 849 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore)) 850 return ModRefInfo::Mod; 851 852 // If the pointer is to a locally allocated object that does not escape, 853 // then the call can not mod/ref the pointer unless the call takes the pointer 854 // as an argument, and itself doesn't capture it. 855 if (!isa<Constant>(Object) && Call != Object && 856 isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) { 857 858 // Optimistically assume that call doesn't touch Object and check this 859 // assumption in the following loop. 860 ModRefInfo Result = ModRefInfo::NoModRef; 861 bool IsMustAlias = true; 862 863 unsigned OperandNo = 0; 864 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end(); 865 CI != CE; ++CI, ++OperandNo) { 866 // Only look at the no-capture or byval pointer arguments. If this 867 // pointer were passed to arguments that were neither of these, then it 868 // couldn't be no-capture. 869 if (!(*CI)->getType()->isPointerTy() || 870 (!Call->doesNotCapture(OperandNo) && 871 OperandNo < Call->getNumArgOperands() && 872 !Call->isByValArgument(OperandNo))) 873 continue; 874 875 // Call doesn't access memory through this operand, so we don't care 876 // if it aliases with Object. 877 if (Call->doesNotAccessMemory(OperandNo)) 878 continue; 879 880 // If this is a no-capture pointer argument, see if we can tell that it 881 // is impossible to alias the pointer we're checking. 882 AliasResult AR = getBestAAResults().alias(MemoryLocation(*CI), 883 MemoryLocation(Object), AAQI); 884 if (AR != MustAlias) 885 IsMustAlias = false; 886 // Operand doesn't alias 'Object', continue looking for other aliases 887 if (AR == NoAlias) 888 continue; 889 // Operand aliases 'Object', but call doesn't modify it. Strengthen 890 // initial assumption and keep looking in case if there are more aliases. 891 if (Call->onlyReadsMemory(OperandNo)) { 892 Result = setRef(Result); 893 continue; 894 } 895 // Operand aliases 'Object' but call only writes into it. 896 if (Call->doesNotReadMemory(OperandNo)) { 897 Result = setMod(Result); 898 continue; 899 } 900 // This operand aliases 'Object' and call reads and writes into it. 901 // Setting ModRef will not yield an early return below, MustAlias is not 902 // used further. 903 Result = ModRefInfo::ModRef; 904 break; 905 } 906 907 // No operand aliases, reset Must bit. Add below if at least one aliases 908 // and all aliases found are MustAlias. 909 if (isNoModRef(Result)) 910 IsMustAlias = false; 911 912 // Early return if we improved mod ref information 913 if (!isModAndRefSet(Result)) { 914 if (isNoModRef(Result)) 915 return ModRefInfo::NoModRef; 916 return IsMustAlias ? setMust(Result) : clearMust(Result); 917 } 918 } 919 920 // If the call is malloc/calloc like, we can assume that it doesn't 921 // modify any IR visible value. This is only valid because we assume these 922 // routines do not read values visible in the IR. TODO: Consider special 923 // casing realloc and strdup routines which access only their arguments as 924 // well. Or alternatively, replace all of this with inaccessiblememonly once 925 // that's implemented fully. 926 if (isMallocOrCallocLikeFn(Call, &TLI)) { 927 // Be conservative if the accessed pointer may alias the allocation - 928 // fallback to the generic handling below. 929 if (getBestAAResults().alias(MemoryLocation(Call), Loc, AAQI) == NoAlias) 930 return ModRefInfo::NoModRef; 931 } 932 933 // The semantics of memcpy intrinsics either exactly overlap or do not 934 // overlap, i.e., source and destination of any given memcpy are either 935 // no-alias or must-alias. 936 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) { 937 AliasResult SrcAA = 938 getBestAAResults().alias(MemoryLocation::getForSource(Inst), Loc, AAQI); 939 AliasResult DestAA = 940 getBestAAResults().alias(MemoryLocation::getForDest(Inst), Loc, AAQI); 941 // It's also possible for Loc to alias both src and dest, or neither. 942 ModRefInfo rv = ModRefInfo::NoModRef; 943 if (SrcAA != NoAlias) 944 rv = setRef(rv); 945 if (DestAA != NoAlias) 946 rv = setMod(rv); 947 return rv; 948 } 949 950 // While the assume intrinsic is marked as arbitrarily writing so that 951 // proper control dependencies will be maintained, it never aliases any 952 // particular memory location. 953 if (isIntrinsicCall(Call, Intrinsic::assume)) 954 return ModRefInfo::NoModRef; 955 956 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 957 // that proper control dependencies are maintained but they never mods any 958 // particular memory location. 959 // 960 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 961 // heap state at the point the guard is issued needs to be consistent in case 962 // the guard invokes the "deopt" continuation. 963 if (isIntrinsicCall(Call, Intrinsic::experimental_guard)) 964 return ModRefInfo::Ref; 965 966 // Like assumes, invariant.start intrinsics were also marked as arbitrarily 967 // writing so that proper control dependencies are maintained but they never 968 // mod any particular memory location visible to the IR. 969 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start 970 // intrinsic is now modeled as reading memory. This prevents hoisting the 971 // invariant.start intrinsic over stores. Consider: 972 // *ptr = 40; 973 // *ptr = 50; 974 // invariant_start(ptr) 975 // int val = *ptr; 976 // print(val); 977 // 978 // This cannot be transformed to: 979 // 980 // *ptr = 40; 981 // invariant_start(ptr) 982 // *ptr = 50; 983 // int val = *ptr; 984 // print(val); 985 // 986 // The transformation will cause the second store to be ignored (based on 987 // rules of invariant.start) and print 40, while the first program always 988 // prints 50. 989 if (isIntrinsicCall(Call, Intrinsic::invariant_start)) 990 return ModRefInfo::Ref; 991 992 // The AAResultBase base class has some smarts, lets use them. 993 return AAResultBase::getModRefInfo(Call, Loc, AAQI); 994 } 995 996 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1, 997 const CallBase *Call2, 998 AAQueryInfo &AAQI) { 999 // While the assume intrinsic is marked as arbitrarily writing so that 1000 // proper control dependencies will be maintained, it never aliases any 1001 // particular memory location. 1002 if (isIntrinsicCall(Call1, Intrinsic::assume) || 1003 isIntrinsicCall(Call2, Intrinsic::assume)) 1004 return ModRefInfo::NoModRef; 1005 1006 // Like assumes, guard intrinsics are also marked as arbitrarily writing so 1007 // that proper control dependencies are maintained but they never mod any 1008 // particular memory location. 1009 // 1010 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the 1011 // heap state at the point the guard is issued needs to be consistent in case 1012 // the guard invokes the "deopt" continuation. 1013 1014 // NB! This function is *not* commutative, so we special case two 1015 // possibilities for guard intrinsics. 1016 1017 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard)) 1018 return isModSet(createModRefInfo(getModRefBehavior(Call2))) 1019 ? ModRefInfo::Ref 1020 : ModRefInfo::NoModRef; 1021 1022 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard)) 1023 return isModSet(createModRefInfo(getModRefBehavior(Call1))) 1024 ? ModRefInfo::Mod 1025 : ModRefInfo::NoModRef; 1026 1027 // The AAResultBase base class has some smarts, lets use them. 1028 return AAResultBase::getModRefInfo(Call1, Call2, AAQI); 1029 } 1030 1031 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators, 1032 /// both having the exact same pointer operand. 1033 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1, 1034 LocationSize MaybeV1Size, 1035 const GEPOperator *GEP2, 1036 LocationSize MaybeV2Size, 1037 const DataLayout &DL) { 1038 assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() == 1039 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() && 1040 GEP1->getPointerOperandType() == GEP2->getPointerOperandType() && 1041 "Expected GEPs with the same pointer operand"); 1042 1043 // Try to determine whether GEP1 and GEP2 index through arrays, into structs, 1044 // such that the struct field accesses provably cannot alias. 1045 // We also need at least two indices (the pointer, and the struct field). 1046 if (GEP1->getNumIndices() != GEP2->getNumIndices() || 1047 GEP1->getNumIndices() < 2) 1048 return MayAlias; 1049 1050 // If we don't know the size of the accesses through both GEPs, we can't 1051 // determine whether the struct fields accessed can't alias. 1052 if (MaybeV1Size == LocationSize::unknown() || 1053 MaybeV2Size == LocationSize::unknown()) 1054 return MayAlias; 1055 1056 const uint64_t V1Size = MaybeV1Size.getValue(); 1057 const uint64_t V2Size = MaybeV2Size.getValue(); 1058 1059 ConstantInt *C1 = 1060 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1)); 1061 ConstantInt *C2 = 1062 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1)); 1063 1064 // If the last (struct) indices are constants and are equal, the other indices 1065 // might be also be dynamically equal, so the GEPs can alias. 1066 if (C1 && C2) { 1067 unsigned BitWidth = std::max(C1->getBitWidth(), C2->getBitWidth()); 1068 if (C1->getValue().sextOrSelf(BitWidth) == 1069 C2->getValue().sextOrSelf(BitWidth)) 1070 return MayAlias; 1071 } 1072 1073 // Find the last-indexed type of the GEP, i.e., the type you'd get if 1074 // you stripped the last index. 1075 // On the way, look at each indexed type. If there's something other 1076 // than an array, different indices can lead to different final types. 1077 SmallVector<Value *, 8> IntermediateIndices; 1078 1079 // Insert the first index; we don't need to check the type indexed 1080 // through it as it only drops the pointer indirection. 1081 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine"); 1082 IntermediateIndices.push_back(GEP1->getOperand(1)); 1083 1084 // Insert all the remaining indices but the last one. 1085 // Also, check that they all index through arrays. 1086 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) { 1087 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType( 1088 GEP1->getSourceElementType(), IntermediateIndices))) 1089 return MayAlias; 1090 IntermediateIndices.push_back(GEP1->getOperand(i + 1)); 1091 } 1092 1093 auto *Ty = GetElementPtrInst::getIndexedType( 1094 GEP1->getSourceElementType(), IntermediateIndices); 1095 StructType *LastIndexedStruct = dyn_cast<StructType>(Ty); 1096 1097 if (isa<ArrayType>(Ty) || isa<VectorType>(Ty)) { 1098 // We know that: 1099 // - both GEPs begin indexing from the exact same pointer; 1100 // - the last indices in both GEPs are constants, indexing into a sequential 1101 // type (array or vector); 1102 // - both GEPs only index through arrays prior to that. 1103 // 1104 // Because array indices greater than the number of elements are valid in 1105 // GEPs, unless we know the intermediate indices are identical between 1106 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't 1107 // partially overlap. We also need to check that the loaded size matches 1108 // the element size, otherwise we could still have overlap. 1109 Type *LastElementTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0); 1110 const uint64_t ElementSize = 1111 DL.getTypeStoreSize(LastElementTy).getFixedSize(); 1112 if (V1Size != ElementSize || V2Size != ElementSize) 1113 return MayAlias; 1114 1115 for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i) 1116 if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1)) 1117 return MayAlias; 1118 1119 // Now we know that the array/pointer that GEP1 indexes into and that 1120 // that GEP2 indexes into must either precisely overlap or be disjoint. 1121 // Because they cannot partially overlap and because fields in an array 1122 // cannot overlap, if we can prove the final indices are different between 1123 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias. 1124 1125 // If the last indices are constants, we've already checked they don't 1126 // equal each other so we can exit early. 1127 if (C1 && C2) 1128 return NoAlias; 1129 { 1130 Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1); 1131 Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1); 1132 if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) { 1133 // If one of the indices is a PHI node, be safe and only use 1134 // computeKnownBits so we don't make any assumptions about the 1135 // relationships between the two indices. This is important if we're 1136 // asking about values from different loop iterations. See PR32314. 1137 // TODO: We may be able to change the check so we only do this when 1138 // we definitely looked through a PHINode. 1139 if (GEP1LastIdx != GEP2LastIdx && 1140 GEP1LastIdx->getType() == GEP2LastIdx->getType()) { 1141 KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL); 1142 KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL); 1143 if (Known1.Zero.intersects(Known2.One) || 1144 Known1.One.intersects(Known2.Zero)) 1145 return NoAlias; 1146 } 1147 } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL)) 1148 return NoAlias; 1149 } 1150 return MayAlias; 1151 } else if (!LastIndexedStruct || !C1 || !C2) { 1152 return MayAlias; 1153 } 1154 1155 if (C1->getValue().getActiveBits() > 64 || 1156 C2->getValue().getActiveBits() > 64) 1157 return MayAlias; 1158 1159 // We know that: 1160 // - both GEPs begin indexing from the exact same pointer; 1161 // - the last indices in both GEPs are constants, indexing into a struct; 1162 // - said indices are different, hence, the pointed-to fields are different; 1163 // - both GEPs only index through arrays prior to that. 1164 // 1165 // This lets us determine that the struct that GEP1 indexes into and the 1166 // struct that GEP2 indexes into must either precisely overlap or be 1167 // completely disjoint. Because they cannot partially overlap, indexing into 1168 // different non-overlapping fields of the struct will never alias. 1169 1170 // Therefore, the only remaining thing needed to show that both GEPs can't 1171 // alias is that the fields are not overlapping. 1172 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct); 1173 const uint64_t StructSize = SL->getSizeInBytes(); 1174 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue()); 1175 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue()); 1176 1177 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size, 1178 uint64_t V2Off, uint64_t V2Size) { 1179 return V1Off < V2Off && V1Off + V1Size <= V2Off && 1180 ((V2Off + V2Size <= StructSize) || 1181 (V2Off + V2Size - StructSize <= V1Off)); 1182 }; 1183 1184 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) || 1185 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size)) 1186 return NoAlias; 1187 1188 return MayAlias; 1189 } 1190 1191 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the 1192 // beginning of the object the GEP points would have a negative offset with 1193 // repsect to the alloca, that means the GEP can not alias pointer (b). 1194 // Note that the pointer based on the alloca may not be a GEP. For 1195 // example, it may be the alloca itself. 1196 // The same applies if (b) is based on a GlobalVariable. Note that just being 1197 // based on isIdentifiedObject() is not enough - we need an identified object 1198 // that does not permit access to negative offsets. For example, a negative 1199 // offset from a noalias argument or call can be inbounds w.r.t the actual 1200 // underlying object. 1201 // 1202 // For example, consider: 1203 // 1204 // struct { int f0, int f1, ...} foo; 1205 // foo alloca; 1206 // foo* random = bar(alloca); 1207 // int *f0 = &alloca.f0 1208 // int *f1 = &random->f1; 1209 // 1210 // Which is lowered, approximately, to: 1211 // 1212 // %alloca = alloca %struct.foo 1213 // %random = call %struct.foo* @random(%struct.foo* %alloca) 1214 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0 1215 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1 1216 // 1217 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated 1218 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also 1219 // point into the same object. But since %f0 points to the beginning of %alloca, 1220 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher 1221 // than (%alloca - 1), and so is not inbounds, a contradiction. 1222 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp, 1223 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject, 1224 LocationSize MaybeObjectAccessSize) { 1225 // If the object access size is unknown, or the GEP isn't inbounds, bail. 1226 if (MaybeObjectAccessSize == LocationSize::unknown() || !GEPOp->isInBounds()) 1227 return false; 1228 1229 const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue(); 1230 1231 // We need the object to be an alloca or a globalvariable, and want to know 1232 // the offset of the pointer from the object precisely, so no variable 1233 // indices are allowed. 1234 if (!(isa<AllocaInst>(DecompObject.Base) || 1235 isa<GlobalVariable>(DecompObject.Base)) || 1236 !DecompObject.VarIndices.empty()) 1237 return false; 1238 1239 APInt ObjectBaseOffset = DecompObject.StructOffset + 1240 DecompObject.OtherOffset; 1241 1242 // If the GEP has no variable indices, we know the precise offset 1243 // from the base, then use it. If the GEP has variable indices, 1244 // we can't get exact GEP offset to identify pointer alias. So return 1245 // false in that case. 1246 if (!DecompGEP.VarIndices.empty()) 1247 return false; 1248 1249 APInt GEPBaseOffset = DecompGEP.StructOffset; 1250 GEPBaseOffset += DecompGEP.OtherOffset; 1251 1252 return GEPBaseOffset.sge(ObjectBaseOffset + (int64_t)ObjectAccessSize); 1253 } 1254 1255 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against 1256 /// another pointer. 1257 /// 1258 /// We know that V1 is a GEP, but we don't know anything about V2. 1259 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for 1260 /// V2. 1261 AliasResult BasicAAResult::aliasGEP( 1262 const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo, 1263 const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo, 1264 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) { 1265 DecomposedGEP DecompGEP1, DecompGEP2; 1266 unsigned MaxPointerSize = getMaxPointerSize(DL); 1267 DecompGEP1.StructOffset = DecompGEP1.OtherOffset = APInt(MaxPointerSize, 0); 1268 DecompGEP2.StructOffset = DecompGEP2.OtherOffset = APInt(MaxPointerSize, 0); 1269 DecompGEP1.HasCompileTimeConstantScale = 1270 DecompGEP2.HasCompileTimeConstantScale = true; 1271 1272 bool GEP1MaxLookupReached = 1273 DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT); 1274 bool GEP2MaxLookupReached = 1275 DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT); 1276 1277 // Don't attempt to analyze the decomposed GEP if index scale is not a 1278 // compile-time constant. 1279 if (!DecompGEP1.HasCompileTimeConstantScale || 1280 !DecompGEP2.HasCompileTimeConstantScale) 1281 return MayAlias; 1282 1283 APInt GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset; 1284 APInt GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset; 1285 1286 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 && 1287 "DecomposeGEPExpression returned a result different from " 1288 "getUnderlyingObject"); 1289 1290 // If the GEP's offset relative to its base is such that the base would 1291 // fall below the start of the object underlying V2, then the GEP and V2 1292 // cannot alias. 1293 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached && 1294 isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size)) 1295 return NoAlias; 1296 // If we have two gep instructions with must-alias or not-alias'ing base 1297 // pointers, figure out if the indexes to the GEP tell us anything about the 1298 // derived pointer. 1299 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) { 1300 // Check for the GEP base being at a negative offset, this time in the other 1301 // direction. 1302 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached && 1303 isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size)) 1304 return NoAlias; 1305 // Do the base pointers alias? 1306 AliasResult BaseAlias = 1307 aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(), 1308 UnderlyingV2, LocationSize::unknown(), AAMDNodes(), AAQI); 1309 1310 // For GEPs with identical sizes and offsets, we can preserve the size 1311 // and AAInfo when performing the alias check on the underlying objects. 1312 if (BaseAlias == MayAlias && V1Size == V2Size && 1313 GEP1BaseOffset == GEP2BaseOffset && 1314 DecompGEP1.VarIndices == DecompGEP2.VarIndices && 1315 !GEP1MaxLookupReached && !GEP2MaxLookupReached) { 1316 AliasResult PreciseBaseAlias = aliasCheck( 1317 UnderlyingV1, V1Size, V1AAInfo, UnderlyingV2, V2Size, V2AAInfo, AAQI); 1318 if (PreciseBaseAlias == NoAlias) 1319 return NoAlias; 1320 } 1321 1322 // If we get a No or May, then return it immediately, no amount of analysis 1323 // will improve this situation. 1324 if (BaseAlias != MustAlias) { 1325 assert(BaseAlias == NoAlias || BaseAlias == MayAlias); 1326 return BaseAlias; 1327 } 1328 1329 // Otherwise, we have a MustAlias. Since the base pointers alias each other 1330 // exactly, see if the computed offset from the common pointer tells us 1331 // about the relation of the resulting pointer. 1332 // If we know the two GEPs are based off of the exact same pointer (and not 1333 // just the same underlying object), see if that tells us anything about 1334 // the resulting pointers. 1335 if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() == 1336 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() && 1337 GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) { 1338 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL); 1339 // If we couldn't find anything interesting, don't abandon just yet. 1340 if (R != MayAlias) 1341 return R; 1342 } 1343 1344 // If the max search depth is reached, the result is undefined 1345 if (GEP2MaxLookupReached || GEP1MaxLookupReached) 1346 return MayAlias; 1347 1348 // Subtract the GEP2 pointer from the GEP1 pointer to find out their 1349 // symbolic difference. 1350 GEP1BaseOffset -= GEP2BaseOffset; 1351 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices); 1352 1353 } else { 1354 // Check to see if these two pointers are related by the getelementptr 1355 // instruction. If one pointer is a GEP with a non-zero index of the other 1356 // pointer, we know they cannot alias. 1357 1358 // If both accesses are unknown size, we can't do anything useful here. 1359 if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown()) 1360 return MayAlias; 1361 1362 AliasResult R = aliasCheck(UnderlyingV1, LocationSize::unknown(), 1363 AAMDNodes(), V2, LocationSize::unknown(), 1364 V2AAInfo, AAQI, nullptr, UnderlyingV2); 1365 if (R != MustAlias) { 1366 // If V2 may alias GEP base pointer, conservatively returns MayAlias. 1367 // If V2 is known not to alias GEP base pointer, then the two values 1368 // cannot alias per GEP semantics: "Any memory access must be done through 1369 // a pointer value associated with an address range of the memory access, 1370 // otherwise the behavior is undefined.". 1371 assert(R == NoAlias || R == MayAlias); 1372 return R; 1373 } 1374 1375 // If the max search depth is reached the result is undefined 1376 if (GEP1MaxLookupReached) 1377 return MayAlias; 1378 } 1379 1380 // In the two GEP Case, if there is no difference in the offsets of the 1381 // computed pointers, the resultant pointers are a must alias. This 1382 // happens when we have two lexically identical GEP's (for example). 1383 // 1384 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 1385 // must aliases the GEP, the end result is a must alias also. 1386 if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty()) 1387 return MustAlias; 1388 1389 // If there is a constant difference between the pointers, but the difference 1390 // is less than the size of the associated memory object, then we know 1391 // that the objects are partially overlapping. If the difference is 1392 // greater, we know they do not overlap. 1393 if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) { 1394 if (GEP1BaseOffset.sge(0)) { 1395 if (V2Size != LocationSize::unknown()) { 1396 if (GEP1BaseOffset.ult(V2Size.getValue())) 1397 return PartialAlias; 1398 return NoAlias; 1399 } 1400 } else { 1401 // We have the situation where: 1402 // + + 1403 // | BaseOffset | 1404 // ---------------->| 1405 // |-->V1Size |-------> V2Size 1406 // GEP1 V2 1407 // We need to know that V2Size is not unknown, otherwise we might have 1408 // stripped a gep with negative index ('gep <ptr>, -1, ...). 1409 if (V1Size != LocationSize::unknown() && 1410 V2Size != LocationSize::unknown()) { 1411 if ((-GEP1BaseOffset).ult(V1Size.getValue())) 1412 return PartialAlias; 1413 return NoAlias; 1414 } 1415 } 1416 } 1417 1418 if (!DecompGEP1.VarIndices.empty()) { 1419 APInt Modulo(MaxPointerSize, 0); 1420 bool AllPositive = true; 1421 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) { 1422 1423 // Try to distinguish something like &A[i][1] against &A[42][0]. 1424 // Grab the least significant bit set in any of the scales. We 1425 // don't need std::abs here (even if the scale's negative) as we'll 1426 // be ^'ing Modulo with itself later. 1427 Modulo |= DecompGEP1.VarIndices[i].Scale; 1428 1429 if (AllPositive) { 1430 // If the Value could change between cycles, then any reasoning about 1431 // the Value this cycle may not hold in the next cycle. We'll just 1432 // give up if we can't determine conditions that hold for every cycle: 1433 const Value *V = DecompGEP1.VarIndices[i].V; 1434 1435 KnownBits Known = 1436 computeKnownBits(V, DL, 0, &AC, dyn_cast<Instruction>(GEP1), DT); 1437 bool SignKnownZero = Known.isNonNegative(); 1438 bool SignKnownOne = Known.isNegative(); 1439 1440 // Zero-extension widens the variable, and so forces the sign 1441 // bit to zero. 1442 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V); 1443 SignKnownZero |= IsZExt; 1444 SignKnownOne &= !IsZExt; 1445 1446 // If the variable begins with a zero then we know it's 1447 // positive, regardless of whether the value is signed or 1448 // unsigned. 1449 APInt Scale = DecompGEP1.VarIndices[i].Scale; 1450 AllPositive = 1451 (SignKnownZero && Scale.sge(0)) || (SignKnownOne && Scale.slt(0)); 1452 } 1453 } 1454 1455 Modulo = Modulo ^ (Modulo & (Modulo - 1)); 1456 1457 // We can compute the difference between the two addresses 1458 // mod Modulo. Check whether that difference guarantees that the 1459 // two locations do not alias. 1460 APInt ModOffset = GEP1BaseOffset & (Modulo - 1); 1461 if (V1Size != LocationSize::unknown() && 1462 V2Size != LocationSize::unknown() && ModOffset.uge(V2Size.getValue()) && 1463 (Modulo - ModOffset).uge(V1Size.getValue())) 1464 return NoAlias; 1465 1466 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr. 1467 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers 1468 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr. 1469 if (AllPositive && GEP1BaseOffset.sgt(0) && 1470 V2Size != LocationSize::unknown() && 1471 GEP1BaseOffset.uge(V2Size.getValue())) 1472 return NoAlias; 1473 1474 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size, 1475 GEP1BaseOffset, &AC, DT)) 1476 return NoAlias; 1477 } 1478 1479 // Statically, we can see that the base objects are the same, but the 1480 // pointers have dynamic offsets which we can't resolve. And none of our 1481 // little tricks above worked. 1482 return MayAlias; 1483 } 1484 1485 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { 1486 // If the results agree, take it. 1487 if (A == B) 1488 return A; 1489 // A mix of PartialAlias and MustAlias is PartialAlias. 1490 if ((A == PartialAlias && B == MustAlias) || 1491 (B == PartialAlias && A == MustAlias)) 1492 return PartialAlias; 1493 // Otherwise, we don't know anything. 1494 return MayAlias; 1495 } 1496 1497 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction 1498 /// against another. 1499 AliasResult 1500 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize, 1501 const AAMDNodes &SIAAInfo, const Value *V2, 1502 LocationSize V2Size, const AAMDNodes &V2AAInfo, 1503 const Value *UnderV2, AAQueryInfo &AAQI) { 1504 // If the values are Selects with the same condition, we can do a more precise 1505 // check: just check for aliases between the values on corresponding arms. 1506 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) 1507 if (SI->getCondition() == SI2->getCondition()) { 1508 AliasResult Alias = 1509 aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, SI2->getTrueValue(), 1510 V2Size, V2AAInfo, AAQI); 1511 if (Alias == MayAlias) 1512 return MayAlias; 1513 AliasResult ThisAlias = 1514 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo, 1515 SI2->getFalseValue(), V2Size, V2AAInfo, AAQI); 1516 return MergeAliasResults(ThisAlias, Alias); 1517 } 1518 1519 // If both arms of the Select node NoAlias or MustAlias V2, then returns 1520 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1521 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), 1522 SISize, SIAAInfo, AAQI, UnderV2); 1523 if (Alias == MayAlias) 1524 return MayAlias; 1525 1526 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), 1527 SISize, SIAAInfo, AAQI, UnderV2); 1528 return MergeAliasResults(ThisAlias, Alias); 1529 } 1530 1531 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against 1532 /// another. 1533 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize, 1534 const AAMDNodes &PNAAInfo, const Value *V2, 1535 LocationSize V2Size, 1536 const AAMDNodes &V2AAInfo, 1537 const Value *UnderV2, AAQueryInfo &AAQI) { 1538 // Track phi nodes we have visited. We use this information when we determine 1539 // value equivalence. 1540 VisitedPhiBBs.insert(PN->getParent()); 1541 1542 // If the values are PHIs in the same block, we can do a more precise 1543 // as well as efficient check: just check for aliases between the values 1544 // on corresponding edges. 1545 if (const PHINode *PN2 = dyn_cast<PHINode>(V2)) 1546 if (PN2->getParent() == PN->getParent()) { 1547 AAQueryInfo::LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo), 1548 MemoryLocation(V2, V2Size, V2AAInfo)); 1549 if (PN > V2) 1550 std::swap(Locs.first, Locs.second); 1551 // Analyse the PHIs' inputs under the assumption that the PHIs are 1552 // NoAlias. 1553 // If the PHIs are May/MustAlias there must be (recursively) an input 1554 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or 1555 // there must be an operation on the PHIs within the PHIs' value cycle 1556 // that causes a MayAlias. 1557 // Pretend the phis do not alias. 1558 AliasResult Alias = NoAlias; 1559 AliasResult OrigAliasResult; 1560 { 1561 // Limited lifetime iterator invalidated by the aliasCheck call below. 1562 auto CacheIt = AAQI.AliasCache.find(Locs); 1563 assert((CacheIt != AAQI.AliasCache.end()) && 1564 "There must exist an entry for the phi node"); 1565 OrigAliasResult = CacheIt->second; 1566 CacheIt->second = NoAlias; 1567 } 1568 1569 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1570 AliasResult ThisAlias = 1571 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo, 1572 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), 1573 V2Size, V2AAInfo, AAQI); 1574 Alias = MergeAliasResults(ThisAlias, Alias); 1575 if (Alias == MayAlias) 1576 break; 1577 } 1578 1579 // Reset if speculation failed. 1580 if (Alias != NoAlias) 1581 AAQI.updateResult(Locs, OrigAliasResult); 1582 return Alias; 1583 } 1584 1585 SmallVector<Value *, 4> V1Srcs; 1586 // For a recursive phi, that recurses through a contant gep, we can perform 1587 // aliasing calculations using the other phi operands with an unknown size to 1588 // specify that an unknown number of elements after the initial value are 1589 // potentially accessed. 1590 bool isRecursive = false; 1591 auto CheckForRecPhi = [&](Value *PV) { 1592 if (!EnableRecPhiAnalysis) 1593 return false; 1594 if (GEPOperator *PVGEP = dyn_cast<GEPOperator>(PV)) { 1595 // Check whether the incoming value is a GEP that advances the pointer 1596 // result of this PHI node (e.g. in a loop). If this is the case, we 1597 // would recurse and always get a MayAlias. Handle this case specially 1598 // below. We need to ensure that the phi is inbounds and has a constant 1599 // positive operand so that we can check for alias with the initial value 1600 // and an unknown but positive size. 1601 if (PVGEP->getPointerOperand() == PN && PVGEP->isInBounds() && 1602 PVGEP->getNumIndices() == 1 && isa<ConstantInt>(PVGEP->idx_begin()) && 1603 !cast<ConstantInt>(PVGEP->idx_begin())->isNegative()) { 1604 isRecursive = true; 1605 return true; 1606 } 1607 } 1608 return false; 1609 }; 1610 1611 if (PV) { 1612 // If we have PhiValues then use it to get the underlying phi values. 1613 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN); 1614 // If we have more phi values than the search depth then return MayAlias 1615 // conservatively to avoid compile time explosion. The worst possible case 1616 // is if both sides are PHI nodes. In which case, this is O(m x n) time 1617 // where 'm' and 'n' are the number of PHI sources. 1618 if (PhiValueSet.size() > MaxLookupSearchDepth) 1619 return MayAlias; 1620 // Add the values to V1Srcs 1621 for (Value *PV1 : PhiValueSet) { 1622 if (CheckForRecPhi(PV1)) 1623 continue; 1624 V1Srcs.push_back(PV1); 1625 } 1626 } else { 1627 // If we don't have PhiInfo then just look at the operands of the phi itself 1628 // FIXME: Remove this once we can guarantee that we have PhiInfo always 1629 SmallPtrSet<Value *, 4> UniqueSrc; 1630 for (Value *PV1 : PN->incoming_values()) { 1631 if (isa<PHINode>(PV1)) 1632 // If any of the source itself is a PHI, return MayAlias conservatively 1633 // to avoid compile time explosion. The worst possible case is if both 1634 // sides are PHI nodes. In which case, this is O(m x n) time where 'm' 1635 // and 'n' are the number of PHI sources. 1636 return MayAlias; 1637 1638 if (CheckForRecPhi(PV1)) 1639 continue; 1640 1641 if (UniqueSrc.insert(PV1).second) 1642 V1Srcs.push_back(PV1); 1643 } 1644 } 1645 1646 // If V1Srcs is empty then that means that the phi has no underlying non-phi 1647 // value. This should only be possible in blocks unreachable from the entry 1648 // block, but return MayAlias just in case. 1649 if (V1Srcs.empty()) 1650 return MayAlias; 1651 1652 // If this PHI node is recursive, set the size of the accessed memory to 1653 // unknown to represent all the possible values the GEP could advance the 1654 // pointer to. 1655 if (isRecursive) 1656 PNSize = LocationSize::unknown(); 1657 1658 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize, 1659 PNAAInfo, AAQI, UnderV2); 1660 1661 // Early exit if the check of the first PHI source against V2 is MayAlias. 1662 // Other results are not possible. 1663 if (Alias == MayAlias) 1664 return MayAlias; 1665 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will 1666 // remain valid to all elements and needs to conservatively return MayAlias. 1667 if (isRecursive && Alias != NoAlias) 1668 return MayAlias; 1669 1670 // If all sources of the PHI node NoAlias or MustAlias V2, then returns 1671 // NoAlias / MustAlias. Otherwise, returns MayAlias. 1672 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) { 1673 Value *V = V1Srcs[i]; 1674 1675 AliasResult ThisAlias = 1676 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, AAQI, UnderV2); 1677 Alias = MergeAliasResults(ThisAlias, Alias); 1678 if (Alias == MayAlias) 1679 break; 1680 } 1681 1682 return Alias; 1683 } 1684 1685 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as 1686 /// array references. 1687 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size, 1688 const AAMDNodes &V1AAInfo, 1689 const Value *V2, LocationSize V2Size, 1690 const AAMDNodes &V2AAInfo, 1691 AAQueryInfo &AAQI, const Value *O1, 1692 const Value *O2) { 1693 // If either of the memory references is empty, it doesn't matter what the 1694 // pointer values are. 1695 if (V1Size.isZero() || V2Size.isZero()) 1696 return NoAlias; 1697 1698 // Strip off any casts if they exist. 1699 V1 = V1->stripPointerCastsAndInvariantGroups(); 1700 V2 = V2->stripPointerCastsAndInvariantGroups(); 1701 1702 // If V1 or V2 is undef, the result is NoAlias because we can always pick a 1703 // value for undef that aliases nothing in the program. 1704 if (isa<UndefValue>(V1) || isa<UndefValue>(V2)) 1705 return NoAlias; 1706 1707 // Are we checking for alias of the same value? 1708 // Because we look 'through' phi nodes, we could look at "Value" pointers from 1709 // different iterations. We must therefore make sure that this is not the 1710 // case. The function isValueEqualInPotentialCycles ensures that this cannot 1711 // happen by looking at the visited phi nodes and making sure they cannot 1712 // reach the value. 1713 if (isValueEqualInPotentialCycles(V1, V2)) 1714 return MustAlias; 1715 1716 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy()) 1717 return NoAlias; // Scalars cannot alias each other 1718 1719 // Figure out what objects these things are pointing to if we can. 1720 if (O1 == nullptr) 1721 O1 = getUnderlyingObject(V1, MaxLookupSearchDepth); 1722 1723 if (O2 == nullptr) 1724 O2 = getUnderlyingObject(V2, MaxLookupSearchDepth); 1725 1726 // Null values in the default address space don't point to any object, so they 1727 // don't alias any other pointer. 1728 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1)) 1729 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1730 return NoAlias; 1731 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2)) 1732 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace())) 1733 return NoAlias; 1734 1735 if (O1 != O2) { 1736 // If V1/V2 point to two different objects, we know that we have no alias. 1737 if (isIdentifiedObject(O1) && isIdentifiedObject(O2)) 1738 return NoAlias; 1739 1740 // Constant pointers can't alias with non-const isIdentifiedObject objects. 1741 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) || 1742 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1))) 1743 return NoAlias; 1744 1745 // Function arguments can't alias with things that are known to be 1746 // unambigously identified at the function level. 1747 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) || 1748 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1))) 1749 return NoAlias; 1750 1751 // If one pointer is the result of a call/invoke or load and the other is a 1752 // non-escaping local object within the same function, then we know the 1753 // object couldn't escape to a point where the call could return it. 1754 // 1755 // Note that if the pointers are in different functions, there are a 1756 // variety of complications. A call with a nocapture argument may still 1757 // temporary store the nocapture argument's value in a temporary memory 1758 // location if that memory location doesn't escape. Or it may pass a 1759 // nocapture value to other functions as long as they don't capture it. 1760 if (isEscapeSource(O1) && 1761 isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache)) 1762 return NoAlias; 1763 if (isEscapeSource(O2) && 1764 isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache)) 1765 return NoAlias; 1766 } 1767 1768 // If the size of one access is larger than the entire object on the other 1769 // side, then we know such behavior is undefined and can assume no alias. 1770 bool NullIsValidLocation = NullPointerIsDefined(&F); 1771 if ((isObjectSmallerThan( 1772 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL, 1773 TLI, NullIsValidLocation)) || 1774 (isObjectSmallerThan( 1775 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL, 1776 TLI, NullIsValidLocation))) 1777 return NoAlias; 1778 1779 // Check the cache before climbing up use-def chains. This also terminates 1780 // otherwise infinitely recursive queries. 1781 AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo), 1782 MemoryLocation(V2, V2Size, V2AAInfo)); 1783 if (V1 > V2) 1784 std::swap(Locs.first, Locs.second); 1785 std::pair<AAQueryInfo::AliasCacheT::iterator, bool> Pair = 1786 AAQI.AliasCache.try_emplace(Locs, MayAlias); 1787 if (!Pair.second) 1788 return Pair.first->second; 1789 1790 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the 1791 // GEP can't simplify, we don't even look at the PHI cases. 1792 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) { 1793 AliasResult Result = 1794 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI); 1795 if (Result != MayAlias) 1796 return AAQI.updateResult(Locs, Result); 1797 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) { 1798 AliasResult Result = 1799 aliasGEP(GV2, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, O2, O1, AAQI); 1800 if (Result != MayAlias) 1801 return AAQI.updateResult(Locs, Result); 1802 } 1803 1804 if (const PHINode *PN = dyn_cast<PHINode>(V1)) { 1805 AliasResult Result = 1806 aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI); 1807 if (Result != MayAlias) 1808 return AAQI.updateResult(Locs, Result); 1809 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) { 1810 AliasResult Result = 1811 aliasPHI(PN, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, O1, AAQI); 1812 if (Result != MayAlias) 1813 return AAQI.updateResult(Locs, Result); 1814 } 1815 1816 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { 1817 AliasResult Result = 1818 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI); 1819 if (Result != MayAlias) 1820 return AAQI.updateResult(Locs, Result); 1821 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) { 1822 AliasResult Result = 1823 aliasSelect(S2, V2Size, V2AAInfo, V1, V1Size, V1AAInfo, O1, AAQI); 1824 if (Result != MayAlias) 1825 return AAQI.updateResult(Locs, Result); 1826 } 1827 1828 // If both pointers are pointing into the same object and one of them 1829 // accesses the entire object, then the accesses must overlap in some way. 1830 if (O1 == O2) 1831 if (V1Size.isPrecise() && V2Size.isPrecise() && 1832 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) || 1833 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) 1834 return AAQI.updateResult(Locs, PartialAlias); 1835 1836 // Recurse back into the best AA results we have, potentially with refined 1837 // memory locations. We have already ensured that BasicAA has a MayAlias 1838 // cache result for these, so any recursion back into BasicAA won't loop. 1839 AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second, AAQI); 1840 return AAQI.updateResult(Locs, Result); 1841 } 1842 1843 /// Check whether two Values can be considered equivalent. 1844 /// 1845 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether 1846 /// they can not be part of a cycle in the value graph by looking at all 1847 /// visited phi nodes an making sure that the phis cannot reach the value. We 1848 /// have to do this because we are looking through phi nodes (That is we say 1849 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB). 1850 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V, 1851 const Value *V2) { 1852 if (V != V2) 1853 return false; 1854 1855 const Instruction *Inst = dyn_cast<Instruction>(V); 1856 if (!Inst) 1857 return true; 1858 1859 if (VisitedPhiBBs.empty()) 1860 return true; 1861 1862 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck) 1863 return false; 1864 1865 // Make sure that the visited phis cannot reach the Value. This ensures that 1866 // the Values cannot come from different iterations of a potential cycle the 1867 // phi nodes could be involved in. 1868 for (auto *P : VisitedPhiBBs) 1869 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT, LI)) 1870 return false; 1871 1872 return true; 1873 } 1874 1875 /// Computes the symbolic difference between two de-composed GEPs. 1876 /// 1877 /// Dest and Src are the variable indices from two decomposed GetElementPtr 1878 /// instructions GEP1 and GEP2 which have common base pointers. 1879 void BasicAAResult::GetIndexDifference( 1880 SmallVectorImpl<VariableGEPIndex> &Dest, 1881 const SmallVectorImpl<VariableGEPIndex> &Src) { 1882 if (Src.empty()) 1883 return; 1884 1885 for (unsigned i = 0, e = Src.size(); i != e; ++i) { 1886 const Value *V = Src[i].V; 1887 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits; 1888 APInt Scale = Src[i].Scale; 1889 1890 // Find V in Dest. This is N^2, but pointer indices almost never have more 1891 // than a few variable indexes. 1892 for (unsigned j = 0, e = Dest.size(); j != e; ++j) { 1893 if (!isValueEqualInPotentialCycles(Dest[j].V, V) || 1894 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits) 1895 continue; 1896 1897 // If we found it, subtract off Scale V's from the entry in Dest. If it 1898 // goes to zero, remove the entry. 1899 if (Dest[j].Scale != Scale) 1900 Dest[j].Scale -= Scale; 1901 else 1902 Dest.erase(Dest.begin() + j); 1903 Scale = 0; 1904 break; 1905 } 1906 1907 // If we didn't consume this entry, add it to the end of the Dest list. 1908 if (!!Scale) { 1909 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale}; 1910 Dest.push_back(Entry); 1911 } 1912 } 1913 } 1914 1915 bool BasicAAResult::constantOffsetHeuristic( 1916 const SmallVectorImpl<VariableGEPIndex> &VarIndices, 1917 LocationSize MaybeV1Size, LocationSize MaybeV2Size, const APInt &BaseOffset, 1918 AssumptionCache *AC, DominatorTree *DT) { 1919 if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() || 1920 MaybeV2Size == LocationSize::unknown()) 1921 return false; 1922 1923 const uint64_t V1Size = MaybeV1Size.getValue(); 1924 const uint64_t V2Size = MaybeV2Size.getValue(); 1925 1926 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1]; 1927 1928 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits || 1929 Var0.Scale != -Var1.Scale) 1930 return false; 1931 1932 unsigned Width = Var1.V->getType()->getIntegerBitWidth(); 1933 1934 // We'll strip off the Extensions of Var0 and Var1 and do another round 1935 // of GetLinearExpression decomposition. In the example above, if Var0 1936 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1. 1937 1938 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0), 1939 V1Offset(Width, 0); 1940 bool NSW = true, NUW = true; 1941 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0; 1942 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits, 1943 V0SExtBits, DL, 0, AC, DT, NSW, NUW); 1944 NSW = true; 1945 NUW = true; 1946 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits, 1947 V1SExtBits, DL, 0, AC, DT, NSW, NUW); 1948 1949 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits || 1950 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1)) 1951 return false; 1952 1953 // We have a hit - Var0 and Var1 only differ by a constant offset! 1954 1955 // If we've been sext'ed then zext'd the maximum difference between Var0 and 1956 // Var1 is possible to calculate, but we're just interested in the absolute 1957 // minimum difference between the two. The minimum distance may occur due to 1958 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so 1959 // the minimum distance between %i and %i + 5 is 3. 1960 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff; 1961 MinDiff = APIntOps::umin(MinDiff, Wrapped); 1962 APInt MinDiffBytes = 1963 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs(); 1964 1965 // We can't definitely say whether GEP1 is before or after V2 due to wrapping 1966 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other 1967 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and 1968 // V2Size can fit in the MinDiffBytes gap. 1969 return MinDiffBytes.uge(V1Size + BaseOffset.abs()) && 1970 MinDiffBytes.uge(V2Size + BaseOffset.abs()); 1971 } 1972 1973 //===----------------------------------------------------------------------===// 1974 // BasicAliasAnalysis Pass 1975 //===----------------------------------------------------------------------===// 1976 1977 AnalysisKey BasicAA::Key; 1978 1979 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { 1980 return BasicAAResult(F.getParent()->getDataLayout(), 1981 F, 1982 AM.getResult<TargetLibraryAnalysis>(F), 1983 AM.getResult<AssumptionAnalysis>(F), 1984 &AM.getResult<DominatorTreeAnalysis>(F), 1985 AM.getCachedResult<LoopAnalysis>(F), 1986 AM.getCachedResult<PhiValuesAnalysis>(F)); 1987 } 1988 1989 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) { 1990 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry()); 1991 } 1992 1993 char BasicAAWrapperPass::ID = 0; 1994 1995 void BasicAAWrapperPass::anchor() {} 1996 1997 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa", 1998 "Basic Alias Analysis (stateless AA impl)", true, true) 1999 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2000 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2001 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2002 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 2003 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa", 2004 "Basic Alias Analysis (stateless AA impl)", true, true) 2005 2006 FunctionPass *llvm::createBasicAAWrapperPass() { 2007 return new BasicAAWrapperPass(); 2008 } 2009 2010 bool BasicAAWrapperPass::runOnFunction(Function &F) { 2011 auto &ACT = getAnalysis<AssumptionCacheTracker>(); 2012 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>(); 2013 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>(); 2014 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 2015 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>(); 2016 2017 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, 2018 TLIWP.getTLI(F), ACT.getAssumptionCache(F), 2019 &DTWP.getDomTree(), 2020 LIWP ? &LIWP->getLoopInfo() : nullptr, 2021 PVWP ? &PVWP->getResult() : nullptr)); 2022 2023 return false; 2024 } 2025 2026 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 2027 AU.setPreservesAll(); 2028 AU.addRequired<AssumptionCacheTracker>(); 2029 AU.addRequired<DominatorTreeWrapperPass>(); 2030 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2031 AU.addUsedIfAvailable<PhiValuesWrapperPass>(); 2032 } 2033 2034 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) { 2035 return BasicAAResult( 2036 F.getParent()->getDataLayout(), F, 2037 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F), 2038 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F)); 2039 } 2040