1 //===- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation -==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the generic AliasAnalysis interface which is used as the 11 // common interface used by all clients and implementations of alias analysis. 12 // 13 // This file also implements the default version of the AliasAnalysis interface 14 // that is to be used when no other implementation is specified. This does some 15 // simple tests that detect obvious cases: two different global pointers cannot 16 // alias, a global cannot alias a malloc, two different mallocs cannot alias, 17 // etc. 18 // 19 // This alias analysis implementation really isn't very good for anything, but 20 // it is very fast, and makes a nice clean default implementation. Because it 21 // handles lots of little corner cases, other, more complex, alias analysis 22 // implementations may choose to rely on this pass to resolve these simple and 23 // easy cases. 24 // 25 //===----------------------------------------------------------------------===// 26 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/CFG.h" 29 #include "llvm/Analysis/CaptureTracking.h" 30 #include "llvm/Analysis/TargetLibraryInfo.h" 31 #include "llvm/Analysis/ValueTracking.h" 32 #include "llvm/IR/BasicBlock.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/LLVMContext.h" 39 #include "llvm/IR/Type.h" 40 #include "llvm/Pass.h" 41 using namespace llvm; 42 43 // Register the AliasAnalysis interface, providing a nice name to refer to. 44 INITIALIZE_ANALYSIS_GROUP(AliasAnalysis, "Alias Analysis", NoAA) 45 char AliasAnalysis::ID = 0; 46 47 //===----------------------------------------------------------------------===// 48 // Default chaining methods 49 //===----------------------------------------------------------------------===// 50 51 AliasResult AliasAnalysis::alias(const MemoryLocation &LocA, 52 const MemoryLocation &LocB) { 53 assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!"); 54 return AA->alias(LocA, LocB); 55 } 56 57 bool AliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc, 58 bool OrLocal) { 59 assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!"); 60 return AA->pointsToConstantMemory(Loc, OrLocal); 61 } 62 63 ModRefInfo AliasAnalysis::getArgModRefInfo(ImmutableCallSite CS, 64 unsigned ArgIdx) { 65 assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!"); 66 return AA->getArgModRefInfo(CS, ArgIdx); 67 } 68 69 ModRefInfo AliasAnalysis::getModRefInfo(Instruction *I, 70 ImmutableCallSite Call) { 71 // We may have two calls 72 if (auto CS = ImmutableCallSite(I)) { 73 // Check if the two calls modify the same memory 74 return getModRefInfo(Call, CS); 75 } else { 76 // Otherwise, check if the call modifies or references the 77 // location this memory access defines. The best we can say 78 // is that if the call references what this instruction 79 // defines, it must be clobbered by this location. 80 const MemoryLocation DefLoc = MemoryLocation::get(I); 81 if (getModRefInfo(Call, DefLoc) != MRI_NoModRef) 82 return MRI_ModRef; 83 } 84 return MRI_NoModRef; 85 } 86 87 ModRefInfo AliasAnalysis::getModRefInfo(ImmutableCallSite CS, 88 const MemoryLocation &Loc) { 89 assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!"); 90 91 auto MRB = getModRefBehavior(CS); 92 if (MRB == FMRB_DoesNotAccessMemory) 93 return MRI_NoModRef; 94 95 ModRefInfo Mask = MRI_ModRef; 96 if (onlyReadsMemory(MRB)) 97 Mask = MRI_Ref; 98 99 if (onlyAccessesArgPointees(MRB)) { 100 bool doesAlias = false; 101 ModRefInfo AllArgsMask = MRI_NoModRef; 102 if (doesAccessArgPointees(MRB)) { 103 for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end(); 104 AI != AE; ++AI) { 105 const Value *Arg = *AI; 106 if (!Arg->getType()->isPointerTy()) 107 continue; 108 unsigned ArgIdx = std::distance(CS.arg_begin(), AI); 109 MemoryLocation ArgLoc = 110 MemoryLocation::getForArgument(CS, ArgIdx, *TLI); 111 if (!isNoAlias(ArgLoc, Loc)) { 112 ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx); 113 doesAlias = true; 114 AllArgsMask = ModRefInfo(AllArgsMask | ArgMask); 115 } 116 } 117 } 118 if (!doesAlias) 119 return MRI_NoModRef; 120 Mask = ModRefInfo(Mask & AllArgsMask); 121 } 122 123 // If Loc is a constant memory location, the call definitely could not 124 // modify the memory location. 125 if ((Mask & MRI_Mod) && pointsToConstantMemory(Loc)) 126 Mask = ModRefInfo(Mask & ~MRI_Mod); 127 128 // If this is the end of the chain, don't forward. 129 if (!AA) return Mask; 130 131 // Otherwise, fall back to the next AA in the chain. But we can merge 132 // in any mask we've managed to compute. 133 return ModRefInfo(AA->getModRefInfo(CS, Loc) & Mask); 134 } 135 136 ModRefInfo AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, 137 ImmutableCallSite CS2) { 138 assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!"); 139 140 // If CS1 or CS2 are readnone, they don't interact. 141 auto CS1B = getModRefBehavior(CS1); 142 if (CS1B == FMRB_DoesNotAccessMemory) 143 return MRI_NoModRef; 144 145 auto CS2B = getModRefBehavior(CS2); 146 if (CS2B == FMRB_DoesNotAccessMemory) 147 return MRI_NoModRef; 148 149 // If they both only read from memory, there is no dependence. 150 if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B)) 151 return MRI_NoModRef; 152 153 ModRefInfo Mask = MRI_ModRef; 154 155 // If CS1 only reads memory, the only dependence on CS2 can be 156 // from CS1 reading memory written by CS2. 157 if (onlyReadsMemory(CS1B)) 158 Mask = ModRefInfo(Mask & MRI_Ref); 159 160 // If CS2 only access memory through arguments, accumulate the mod/ref 161 // information from CS1's references to the memory referenced by 162 // CS2's arguments. 163 if (onlyAccessesArgPointees(CS2B)) { 164 ModRefInfo R = MRI_NoModRef; 165 if (doesAccessArgPointees(CS2B)) { 166 for (ImmutableCallSite::arg_iterator 167 I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) { 168 const Value *Arg = *I; 169 if (!Arg->getType()->isPointerTy()) 170 continue; 171 unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I); 172 auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, *TLI); 173 174 // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence of 175 // CS1 on that location is the inverse. 176 ModRefInfo ArgMask = getArgModRefInfo(CS2, CS2ArgIdx); 177 if (ArgMask == MRI_Mod) 178 ArgMask = MRI_ModRef; 179 else if (ArgMask == MRI_Ref) 180 ArgMask = MRI_Mod; 181 182 R = ModRefInfo((R | (getModRefInfo(CS1, CS2ArgLoc) & ArgMask)) & Mask); 183 if (R == Mask) 184 break; 185 } 186 } 187 return R; 188 } 189 190 // If CS1 only accesses memory through arguments, check if CS2 references 191 // any of the memory referenced by CS1's arguments. If not, return NoModRef. 192 if (onlyAccessesArgPointees(CS1B)) { 193 ModRefInfo R = MRI_NoModRef; 194 if (doesAccessArgPointees(CS1B)) { 195 for (ImmutableCallSite::arg_iterator 196 I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) { 197 const Value *Arg = *I; 198 if (!Arg->getType()->isPointerTy()) 199 continue; 200 unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I); 201 auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, *TLI); 202 203 // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod 204 // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1 205 // might Ref, then we care only about a Mod by CS2. 206 ModRefInfo ArgMask = getArgModRefInfo(CS1, CS1ArgIdx); 207 ModRefInfo ArgR = getModRefInfo(CS2, CS1ArgLoc); 208 if (((ArgMask & MRI_Mod) != MRI_NoModRef && 209 (ArgR & MRI_ModRef) != MRI_NoModRef) || 210 ((ArgMask & MRI_Ref) != MRI_NoModRef && 211 (ArgR & MRI_Mod) != MRI_NoModRef)) 212 R = ModRefInfo((R | ArgMask) & Mask); 213 214 if (R == Mask) 215 break; 216 } 217 } 218 return R; 219 } 220 221 // If this is the end of the chain, don't forward. 222 if (!AA) return Mask; 223 224 // Otherwise, fall back to the next AA in the chain. But we can merge 225 // in any mask we've managed to compute. 226 return ModRefInfo(AA->getModRefInfo(CS1, CS2) & Mask); 227 } 228 229 FunctionModRefBehavior AliasAnalysis::getModRefBehavior(ImmutableCallSite CS) { 230 assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!"); 231 232 auto Min = FMRB_UnknownModRefBehavior; 233 234 // Call back into the alias analysis with the other form of getModRefBehavior 235 // to see if it can give a better response. 236 if (const Function *F = CS.getCalledFunction()) 237 Min = getModRefBehavior(F); 238 239 // If this is the end of the chain, don't forward. 240 if (!AA) return Min; 241 242 // Otherwise, fall back to the next AA in the chain. But we can merge 243 // in any result we've managed to compute. 244 return FunctionModRefBehavior(AA->getModRefBehavior(CS) & Min); 245 } 246 247 FunctionModRefBehavior AliasAnalysis::getModRefBehavior(const Function *F) { 248 assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!"); 249 return AA->getModRefBehavior(F); 250 } 251 252 //===----------------------------------------------------------------------===// 253 // AliasAnalysis non-virtual helper method implementation 254 //===----------------------------------------------------------------------===// 255 256 ModRefInfo AliasAnalysis::getModRefInfo(const LoadInst *L, 257 const MemoryLocation &Loc) { 258 // Be conservative in the face of volatile/atomic. 259 if (!L->isUnordered()) 260 return MRI_ModRef; 261 262 // If the load address doesn't alias the given address, it doesn't read 263 // or write the specified memory. 264 if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc)) 265 return MRI_NoModRef; 266 267 // Otherwise, a load just reads. 268 return MRI_Ref; 269 } 270 271 ModRefInfo AliasAnalysis::getModRefInfo(const StoreInst *S, 272 const MemoryLocation &Loc) { 273 // Be conservative in the face of volatile/atomic. 274 if (!S->isUnordered()) 275 return MRI_ModRef; 276 277 if (Loc.Ptr) { 278 // If the store address cannot alias the pointer in question, then the 279 // specified memory cannot be modified by the store. 280 if (!alias(MemoryLocation::get(S), Loc)) 281 return MRI_NoModRef; 282 283 // If the pointer is a pointer to constant memory, then it could not have 284 // been modified by this store. 285 if (pointsToConstantMemory(Loc)) 286 return MRI_NoModRef; 287 } 288 289 // Otherwise, a store just writes. 290 return MRI_Mod; 291 } 292 293 ModRefInfo AliasAnalysis::getModRefInfo(const VAArgInst *V, 294 const MemoryLocation &Loc) { 295 296 if (Loc.Ptr) { 297 // If the va_arg address cannot alias the pointer in question, then the 298 // specified memory cannot be accessed by the va_arg. 299 if (!alias(MemoryLocation::get(V), Loc)) 300 return MRI_NoModRef; 301 302 // If the pointer is a pointer to constant memory, then it could not have 303 // been modified by this va_arg. 304 if (pointsToConstantMemory(Loc)) 305 return MRI_NoModRef; 306 } 307 308 // Otherwise, a va_arg reads and writes. 309 return MRI_ModRef; 310 } 311 312 ModRefInfo AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX, 313 const MemoryLocation &Loc) { 314 // Acquire/Release cmpxchg has properties that matter for arbitrary addresses. 315 if (CX->getSuccessOrdering() > Monotonic) 316 return MRI_ModRef; 317 318 // If the cmpxchg address does not alias the location, it does not access it. 319 if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc)) 320 return MRI_NoModRef; 321 322 return MRI_ModRef; 323 } 324 325 ModRefInfo AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW, 326 const MemoryLocation &Loc) { 327 // Acquire/Release atomicrmw has properties that matter for arbitrary addresses. 328 if (RMW->getOrdering() > Monotonic) 329 return MRI_ModRef; 330 331 // If the atomicrmw address does not alias the location, it does not access it. 332 if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc)) 333 return MRI_NoModRef; 334 335 return MRI_ModRef; 336 } 337 338 /// \brief Return information about whether a particular call site modifies 339 /// or reads the specified memory location \p MemLoc before instruction \p I 340 /// in a BasicBlock. A ordered basic block \p OBB can be used to speed up 341 /// instruction-ordering queries inside the BasicBlock containing \p I. 342 /// FIXME: this is really just shoring-up a deficiency in alias analysis. 343 /// BasicAA isn't willing to spend linear time determining whether an alloca 344 /// was captured before or after this particular call, while we are. However, 345 /// with a smarter AA in place, this test is just wasting compile time. 346 ModRefInfo AliasAnalysis::callCapturesBefore(const Instruction *I, 347 const MemoryLocation &MemLoc, 348 DominatorTree *DT, 349 OrderedBasicBlock *OBB) { 350 if (!DT) 351 return MRI_ModRef; 352 353 const Value *Object = GetUnderlyingObject(MemLoc.Ptr, *DL); 354 if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) || 355 isa<Constant>(Object)) 356 return MRI_ModRef; 357 358 ImmutableCallSite CS(I); 359 if (!CS.getInstruction() || CS.getInstruction() == Object) 360 return MRI_ModRef; 361 362 if (llvm::PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true, 363 /* StoreCaptures */ true, I, DT, 364 /* include Object */ true, 365 /* OrderedBasicBlock */ OBB)) 366 return MRI_ModRef; 367 368 unsigned ArgNo = 0; 369 ModRefInfo R = MRI_NoModRef; 370 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end(); 371 CI != CE; ++CI, ++ArgNo) { 372 // Only look at the no-capture or byval pointer arguments. If this 373 // pointer were passed to arguments that were neither of these, then it 374 // couldn't be no-capture. 375 if (!(*CI)->getType()->isPointerTy() || 376 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo))) 377 continue; 378 379 // If this is a no-capture pointer argument, see if we can tell that it 380 // is impossible to alias the pointer we're checking. If not, we have to 381 // assume that the call could touch the pointer, even though it doesn't 382 // escape. 383 if (isNoAlias(MemoryLocation(*CI), MemoryLocation(Object))) 384 continue; 385 if (CS.doesNotAccessMemory(ArgNo)) 386 continue; 387 if (CS.onlyReadsMemory(ArgNo)) { 388 R = MRI_Ref; 389 continue; 390 } 391 return MRI_ModRef; 392 } 393 return R; 394 } 395 396 // AliasAnalysis destructor: DO NOT move this to the header file for 397 // AliasAnalysis or else clients of the AliasAnalysis class may not depend on 398 // the AliasAnalysis.o file in the current .a file, causing alias analysis 399 // support to not be included in the tool correctly! 400 // 401 AliasAnalysis::~AliasAnalysis() {} 402 403 /// InitializeAliasAnalysis - Subclasses must call this method to initialize the 404 /// AliasAnalysis interface before any other methods are called. 405 /// 406 void AliasAnalysis::InitializeAliasAnalysis(Pass *P, const DataLayout *NewDL) { 407 DL = NewDL; 408 auto *TLIP = P->getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 409 TLI = TLIP ? &TLIP->getTLI() : nullptr; 410 AA = &P->getAnalysis<AliasAnalysis>(); 411 } 412 413 // getAnalysisUsage - All alias analysis implementations should invoke this 414 // directly (using AliasAnalysis::getAnalysisUsage(AU)). 415 void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { 416 AU.addRequired<AliasAnalysis>(); // All AA's chain 417 } 418 419 /// canBasicBlockModify - Return true if it is possible for execution of the 420 /// specified basic block to modify the location Loc. 421 /// 422 bool AliasAnalysis::canBasicBlockModify(const BasicBlock &BB, 423 const MemoryLocation &Loc) { 424 return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod); 425 } 426 427 /// canInstructionRangeModRef - Return true if it is possible for the 428 /// execution of the specified instructions to mod\ref (according to the 429 /// mode) the location Loc. The instructions to consider are all 430 /// of the instructions in the range of [I1,I2] INCLUSIVE. 431 /// I1 and I2 must be in the same basic block. 432 bool AliasAnalysis::canInstructionRangeModRef(const Instruction &I1, 433 const Instruction &I2, 434 const MemoryLocation &Loc, 435 const ModRefInfo Mode) { 436 assert(I1.getParent() == I2.getParent() && 437 "Instructions not in same basic block!"); 438 BasicBlock::const_iterator I = &I1; 439 BasicBlock::const_iterator E = &I2; 440 ++E; // Convert from inclusive to exclusive range. 441 442 for (; I != E; ++I) // Check every instruction in range 443 if (getModRefInfo(I, Loc) & Mode) 444 return true; 445 return false; 446 } 447 448 /// isNoAliasCall - Return true if this pointer is returned by a noalias 449 /// function. 450 bool llvm::isNoAliasCall(const Value *V) { 451 if (auto CS = ImmutableCallSite(V)) 452 return CS.paramHasAttr(0, Attribute::NoAlias); 453 return false; 454 } 455 456 /// isNoAliasArgument - Return true if this is an argument with the noalias 457 /// attribute. 458 bool llvm::isNoAliasArgument(const Value *V) 459 { 460 if (const Argument *A = dyn_cast<Argument>(V)) 461 return A->hasNoAliasAttr(); 462 return false; 463 } 464 465 /// isIdentifiedObject - Return true if this pointer refers to a distinct and 466 /// identifiable object. This returns true for: 467 /// Global Variables and Functions (but not Global Aliases) 468 /// Allocas and Mallocs 469 /// ByVal and NoAlias Arguments 470 /// NoAlias returns 471 /// 472 bool llvm::isIdentifiedObject(const Value *V) { 473 if (isa<AllocaInst>(V)) 474 return true; 475 if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V)) 476 return true; 477 if (isNoAliasCall(V)) 478 return true; 479 if (const Argument *A = dyn_cast<Argument>(V)) 480 return A->hasNoAliasAttr() || A->hasByValAttr(); 481 return false; 482 } 483 484 /// isIdentifiedFunctionLocal - Return true if V is umabigously identified 485 /// at the function-level. Different IdentifiedFunctionLocals can't alias. 486 /// Further, an IdentifiedFunctionLocal can not alias with any function 487 /// arguments other than itself, which is not necessarily true for 488 /// IdentifiedObjects. 489 bool llvm::isIdentifiedFunctionLocal(const Value *V) 490 { 491 return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasArgument(V); 492 } 493