1 //===- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation -==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the generic AliasAnalysis interface which is used as the 11 // common interface used by all clients and implementations of alias analysis. 12 // 13 // This file also implements the default version of the AliasAnalysis interface 14 // that is to be used when no other implementation is specified. This does some 15 // simple tests that detect obvious cases: two different global pointers cannot 16 // alias, a global cannot alias a malloc, two different mallocs cannot alias, 17 // etc. 18 // 19 // This alias analysis implementation really isn't very good for anything, but 20 // it is very fast, and makes a nice clean default implementation. Because it 21 // handles lots of little corner cases, other, more complex, alias analysis 22 // implementations may choose to rely on this pass to resolve these simple and 23 // easy cases. 24 // 25 //===----------------------------------------------------------------------===// 26 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/BasicAliasAnalysis.h" 29 #include "llvm/Analysis/CFG.h" 30 #include "llvm/Analysis/CFLAndersAliasAnalysis.h" 31 #include "llvm/Analysis/CFLSteensAliasAnalysis.h" 32 #include "llvm/Analysis/CaptureTracking.h" 33 #include "llvm/Analysis/GlobalsModRef.h" 34 #include "llvm/Analysis/ObjCARCAliasAnalysis.h" 35 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 36 #include "llvm/Analysis/ScopedNoAliasAA.h" 37 #include "llvm/Analysis/TargetLibraryInfo.h" 38 #include "llvm/Analysis/TypeBasedAliasAnalysis.h" 39 #include "llvm/Analysis/ValueTracking.h" 40 #include "llvm/IR/BasicBlock.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/Dominators.h" 43 #include "llvm/IR/Function.h" 44 #include "llvm/IR/Instructions.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/IR/LLVMContext.h" 47 #include "llvm/IR/Type.h" 48 #include "llvm/Pass.h" 49 using namespace llvm; 50 51 /// Allow disabling BasicAA from the AA results. This is particularly useful 52 /// when testing to isolate a single AA implementation. 53 static cl::opt<bool> DisableBasicAA("disable-basicaa", cl::Hidden, 54 cl::init(false)); 55 56 AAResults::AAResults(AAResults &&Arg) 57 : TLI(Arg.TLI), AAs(std::move(Arg.AAs)), AADeps(std::move(Arg.AADeps)) { 58 for (auto &AA : AAs) 59 AA->setAAResults(this); 60 } 61 62 AAResults::~AAResults() { 63 // FIXME; It would be nice to at least clear out the pointers back to this 64 // aggregation here, but we end up with non-nesting lifetimes in the legacy 65 // pass manager that prevent this from working. In the legacy pass manager 66 // we'll end up with dangling references here in some cases. 67 #if 0 68 for (auto &AA : AAs) 69 AA->setAAResults(nullptr); 70 #endif 71 } 72 73 bool AAResults::invalidate(Function &F, const PreservedAnalyses &PA, 74 FunctionAnalysisManager::Invalidator &Inv) { 75 // Check if the AA manager itself has been invalidated. 76 auto PAC = PA.getChecker<AAManager>(); 77 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>()) 78 return true; // The manager needs to be blown away, clear everything. 79 80 // Check all of the dependencies registered. 81 for (AnalysisKey *ID : AADeps) 82 if (Inv.invalidate(ID, F, PA)) 83 return true; 84 85 // Everything we depend on is still fine, so are we. Nothing to invalidate. 86 return false; 87 } 88 89 //===----------------------------------------------------------------------===// 90 // Default chaining methods 91 //===----------------------------------------------------------------------===// 92 93 AliasResult AAResults::alias(const MemoryLocation &LocA, 94 const MemoryLocation &LocB) { 95 for (const auto &AA : AAs) { 96 auto Result = AA->alias(LocA, LocB); 97 if (Result != MayAlias) 98 return Result; 99 } 100 return MayAlias; 101 } 102 103 bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc, 104 bool OrLocal) { 105 for (const auto &AA : AAs) 106 if (AA->pointsToConstantMemory(Loc, OrLocal)) 107 return true; 108 109 return false; 110 } 111 112 ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) { 113 ModRefInfo Result = MRI_ModRef; 114 115 for (const auto &AA : AAs) { 116 Result = ModRefInfo(Result & AA->getArgModRefInfo(CS, ArgIdx)); 117 118 // Early-exit the moment we reach the bottom of the lattice. 119 if (Result == MRI_NoModRef) 120 return Result; 121 } 122 123 return Result; 124 } 125 126 ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) { 127 // We may have two calls 128 if (auto CS = ImmutableCallSite(I)) { 129 // Check if the two calls modify the same memory 130 return getModRefInfo(CS, Call); 131 } else if (I->isFenceLike()) { 132 // If this is a fence, just return MRI_ModRef. 133 return MRI_ModRef; 134 } else { 135 // Otherwise, check if the call modifies or references the 136 // location this memory access defines. The best we can say 137 // is that if the call references what this instruction 138 // defines, it must be clobbered by this location. 139 const MemoryLocation DefLoc = MemoryLocation::get(I); 140 if (getModRefInfo(Call, DefLoc) != MRI_NoModRef) 141 return MRI_ModRef; 142 } 143 return MRI_NoModRef; 144 } 145 146 ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS, 147 const MemoryLocation &Loc) { 148 ModRefInfo Result = MRI_ModRef; 149 150 for (const auto &AA : AAs) { 151 Result = ModRefInfo(Result & AA->getModRefInfo(CS, Loc)); 152 153 // Early-exit the moment we reach the bottom of the lattice. 154 if (Result == MRI_NoModRef) 155 return Result; 156 } 157 158 // Try to refine the mod-ref info further using other API entry points to the 159 // aggregate set of AA results. 160 auto MRB = getModRefBehavior(CS); 161 if (MRB == FMRB_DoesNotAccessMemory || 162 MRB == FMRB_OnlyAccessesInaccessibleMem) 163 return MRI_NoModRef; 164 165 if (onlyReadsMemory(MRB)) 166 Result = ModRefInfo(Result & MRI_Ref); 167 else if (doesNotReadMemory(MRB)) 168 Result = ModRefInfo(Result & MRI_Mod); 169 170 if (onlyAccessesArgPointees(MRB) || onlyAccessesInaccessibleOrArgMem(MRB)) { 171 bool DoesAlias = false; 172 ModRefInfo AllArgsMask = MRI_NoModRef; 173 if (doesAccessArgPointees(MRB)) { 174 for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) { 175 const Value *Arg = *AI; 176 if (!Arg->getType()->isPointerTy()) 177 continue; 178 unsigned ArgIdx = std::distance(CS.arg_begin(), AI); 179 MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, TLI); 180 AliasResult ArgAlias = alias(ArgLoc, Loc); 181 if (ArgAlias != NoAlias) { 182 ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx); 183 DoesAlias = true; 184 AllArgsMask = ModRefInfo(AllArgsMask | ArgMask); 185 } 186 } 187 } 188 if (!DoesAlias) 189 return MRI_NoModRef; 190 Result = ModRefInfo(Result & AllArgsMask); 191 } 192 193 // If Loc is a constant memory location, the call definitely could not 194 // modify the memory location. 195 if ((Result & MRI_Mod) && 196 pointsToConstantMemory(Loc, /*OrLocal*/ false)) 197 Result = ModRefInfo(Result & ~MRI_Mod); 198 199 return Result; 200 } 201 202 ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1, 203 ImmutableCallSite CS2) { 204 ModRefInfo Result = MRI_ModRef; 205 206 for (const auto &AA : AAs) { 207 Result = ModRefInfo(Result & AA->getModRefInfo(CS1, CS2)); 208 209 // Early-exit the moment we reach the bottom of the lattice. 210 if (Result == MRI_NoModRef) 211 return Result; 212 } 213 214 // Try to refine the mod-ref info further using other API entry points to the 215 // aggregate set of AA results. 216 217 // If CS1 or CS2 are readnone, they don't interact. 218 auto CS1B = getModRefBehavior(CS1); 219 if (CS1B == FMRB_DoesNotAccessMemory) 220 return MRI_NoModRef; 221 222 auto CS2B = getModRefBehavior(CS2); 223 if (CS2B == FMRB_DoesNotAccessMemory) 224 return MRI_NoModRef; 225 226 // If they both only read from memory, there is no dependence. 227 if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B)) 228 return MRI_NoModRef; 229 230 // If CS1 only reads memory, the only dependence on CS2 can be 231 // from CS1 reading memory written by CS2. 232 if (onlyReadsMemory(CS1B)) 233 Result = ModRefInfo(Result & MRI_Ref); 234 else if (doesNotReadMemory(CS1B)) 235 Result = ModRefInfo(Result & MRI_Mod); 236 237 // If CS2 only access memory through arguments, accumulate the mod/ref 238 // information from CS1's references to the memory referenced by 239 // CS2's arguments. 240 if (onlyAccessesArgPointees(CS2B)) { 241 ModRefInfo R = MRI_NoModRef; 242 if (doesAccessArgPointees(CS2B)) { 243 for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) { 244 const Value *Arg = *I; 245 if (!Arg->getType()->isPointerTy()) 246 continue; 247 unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I); 248 auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, TLI); 249 250 // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence 251 // of CS1 on that location is the inverse. 252 ModRefInfo ArgMask = getArgModRefInfo(CS2, CS2ArgIdx); 253 if (ArgMask == MRI_Mod) 254 ArgMask = MRI_ModRef; 255 else if (ArgMask == MRI_Ref) 256 ArgMask = MRI_Mod; 257 258 ArgMask = ModRefInfo(ArgMask & getModRefInfo(CS1, CS2ArgLoc)); 259 260 R = ModRefInfo((R | ArgMask) & Result); 261 if (R == Result) 262 break; 263 } 264 } 265 return R; 266 } 267 268 // If CS1 only accesses memory through arguments, check if CS2 references 269 // any of the memory referenced by CS1's arguments. If not, return NoModRef. 270 if (onlyAccessesArgPointees(CS1B)) { 271 ModRefInfo R = MRI_NoModRef; 272 if (doesAccessArgPointees(CS1B)) { 273 for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) { 274 const Value *Arg = *I; 275 if (!Arg->getType()->isPointerTy()) 276 continue; 277 unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I); 278 auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, TLI); 279 280 // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod 281 // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1 282 // might Ref, then we care only about a Mod by CS2. 283 ModRefInfo ArgMask = getArgModRefInfo(CS1, CS1ArgIdx); 284 ModRefInfo ArgR = getModRefInfo(CS2, CS1ArgLoc); 285 if (((ArgMask & MRI_Mod) != MRI_NoModRef && 286 (ArgR & MRI_ModRef) != MRI_NoModRef) || 287 ((ArgMask & MRI_Ref) != MRI_NoModRef && 288 (ArgR & MRI_Mod) != MRI_NoModRef)) 289 R = ModRefInfo((R | ArgMask) & Result); 290 291 if (R == Result) 292 break; 293 } 294 } 295 return R; 296 } 297 298 return Result; 299 } 300 301 FunctionModRefBehavior AAResults::getModRefBehavior(ImmutableCallSite CS) { 302 FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior; 303 304 for (const auto &AA : AAs) { 305 Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(CS)); 306 307 // Early-exit the moment we reach the bottom of the lattice. 308 if (Result == FMRB_DoesNotAccessMemory) 309 return Result; 310 } 311 312 return Result; 313 } 314 315 FunctionModRefBehavior AAResults::getModRefBehavior(const Function *F) { 316 FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior; 317 318 for (const auto &AA : AAs) { 319 Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(F)); 320 321 // Early-exit the moment we reach the bottom of the lattice. 322 if (Result == FMRB_DoesNotAccessMemory) 323 return Result; 324 } 325 326 return Result; 327 } 328 329 //===----------------------------------------------------------------------===// 330 // Helper method implementation 331 //===----------------------------------------------------------------------===// 332 333 ModRefInfo AAResults::getModRefInfo(const LoadInst *L, 334 const MemoryLocation &Loc) { 335 // Be conservative in the face of volatile/atomic. 336 if (!L->isUnordered()) 337 return MRI_ModRef; 338 339 // If the load address doesn't alias the given address, it doesn't read 340 // or write the specified memory. 341 if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc)) 342 return MRI_NoModRef; 343 344 // Otherwise, a load just reads. 345 return MRI_Ref; 346 } 347 348 ModRefInfo AAResults::getModRefInfo(const StoreInst *S, 349 const MemoryLocation &Loc) { 350 // Be conservative in the face of volatile/atomic. 351 if (!S->isUnordered()) 352 return MRI_ModRef; 353 354 if (Loc.Ptr) { 355 // If the store address cannot alias the pointer in question, then the 356 // specified memory cannot be modified by the store. 357 if (!alias(MemoryLocation::get(S), Loc)) 358 return MRI_NoModRef; 359 360 // If the pointer is a pointer to constant memory, then it could not have 361 // been modified by this store. 362 if (pointsToConstantMemory(Loc)) 363 return MRI_NoModRef; 364 } 365 366 // Otherwise, a store just writes. 367 return MRI_Mod; 368 } 369 370 ModRefInfo AAResults::getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) { 371 // If we know that the location is a constant memory location, the fence 372 // cannot modify this location. 373 if (Loc.Ptr && pointsToConstantMemory(Loc)) 374 return MRI_Ref; 375 return MRI_ModRef; 376 } 377 378 ModRefInfo AAResults::getModRefInfo(const VAArgInst *V, 379 const MemoryLocation &Loc) { 380 381 if (Loc.Ptr) { 382 // If the va_arg address cannot alias the pointer in question, then the 383 // specified memory cannot be accessed by the va_arg. 384 if (!alias(MemoryLocation::get(V), Loc)) 385 return MRI_NoModRef; 386 387 // If the pointer is a pointer to constant memory, then it could not have 388 // been modified by this va_arg. 389 if (pointsToConstantMemory(Loc)) 390 return MRI_NoModRef; 391 } 392 393 // Otherwise, a va_arg reads and writes. 394 return MRI_ModRef; 395 } 396 397 ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad, 398 const MemoryLocation &Loc) { 399 if (Loc.Ptr) { 400 // If the pointer is a pointer to constant memory, 401 // then it could not have been modified by this catchpad. 402 if (pointsToConstantMemory(Loc)) 403 return MRI_NoModRef; 404 } 405 406 // Otherwise, a catchpad reads and writes. 407 return MRI_ModRef; 408 } 409 410 ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet, 411 const MemoryLocation &Loc) { 412 if (Loc.Ptr) { 413 // If the pointer is a pointer to constant memory, 414 // then it could not have been modified by this catchpad. 415 if (pointsToConstantMemory(Loc)) 416 return MRI_NoModRef; 417 } 418 419 // Otherwise, a catchret reads and writes. 420 return MRI_ModRef; 421 } 422 423 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX, 424 const MemoryLocation &Loc) { 425 // Acquire/Release cmpxchg has properties that matter for arbitrary addresses. 426 if (isStrongerThanMonotonic(CX->getSuccessOrdering())) 427 return MRI_ModRef; 428 429 // If the cmpxchg address does not alias the location, it does not access it. 430 if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc)) 431 return MRI_NoModRef; 432 433 return MRI_ModRef; 434 } 435 436 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW, 437 const MemoryLocation &Loc) { 438 // Acquire/Release atomicrmw has properties that matter for arbitrary addresses. 439 if (isStrongerThanMonotonic(RMW->getOrdering())) 440 return MRI_ModRef; 441 442 // If the atomicrmw address does not alias the location, it does not access it. 443 if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc)) 444 return MRI_NoModRef; 445 446 return MRI_ModRef; 447 } 448 449 /// \brief Return information about whether a particular call site modifies 450 /// or reads the specified memory location \p MemLoc before instruction \p I 451 /// in a BasicBlock. A ordered basic block \p OBB can be used to speed up 452 /// instruction-ordering queries inside the BasicBlock containing \p I. 453 /// FIXME: this is really just shoring-up a deficiency in alias analysis. 454 /// BasicAA isn't willing to spend linear time determining whether an alloca 455 /// was captured before or after this particular call, while we are. However, 456 /// with a smarter AA in place, this test is just wasting compile time. 457 ModRefInfo AAResults::callCapturesBefore(const Instruction *I, 458 const MemoryLocation &MemLoc, 459 DominatorTree *DT, 460 OrderedBasicBlock *OBB) { 461 if (!DT) 462 return MRI_ModRef; 463 464 const Value *Object = 465 GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout()); 466 if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) || 467 isa<Constant>(Object)) 468 return MRI_ModRef; 469 470 ImmutableCallSite CS(I); 471 if (!CS.getInstruction() || CS.getInstruction() == Object) 472 return MRI_ModRef; 473 474 if (llvm::PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true, 475 /* StoreCaptures */ true, I, DT, 476 /* include Object */ true, 477 /* OrderedBasicBlock */ OBB)) 478 return MRI_ModRef; 479 480 unsigned ArgNo = 0; 481 ModRefInfo R = MRI_NoModRef; 482 for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end(); 483 CI != CE; ++CI, ++ArgNo) { 484 // Only look at the no-capture or byval pointer arguments. If this 485 // pointer were passed to arguments that were neither of these, then it 486 // couldn't be no-capture. 487 if (!(*CI)->getType()->isPointerTy() || 488 (!CS.doesNotCapture(ArgNo) && 489 ArgNo < CS.getNumArgOperands() && !CS.isByValArgument(ArgNo))) 490 continue; 491 492 // If this is a no-capture pointer argument, see if we can tell that it 493 // is impossible to alias the pointer we're checking. If not, we have to 494 // assume that the call could touch the pointer, even though it doesn't 495 // escape. 496 if (isNoAlias(MemoryLocation(*CI), MemoryLocation(Object))) 497 continue; 498 if (CS.doesNotAccessMemory(ArgNo)) 499 continue; 500 if (CS.onlyReadsMemory(ArgNo)) { 501 R = MRI_Ref; 502 continue; 503 } 504 return MRI_ModRef; 505 } 506 return R; 507 } 508 509 /// canBasicBlockModify - Return true if it is possible for execution of the 510 /// specified basic block to modify the location Loc. 511 /// 512 bool AAResults::canBasicBlockModify(const BasicBlock &BB, 513 const MemoryLocation &Loc) { 514 return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod); 515 } 516 517 /// canInstructionRangeModRef - Return true if it is possible for the 518 /// execution of the specified instructions to mod\ref (according to the 519 /// mode) the location Loc. The instructions to consider are all 520 /// of the instructions in the range of [I1,I2] INCLUSIVE. 521 /// I1 and I2 must be in the same basic block. 522 bool AAResults::canInstructionRangeModRef(const Instruction &I1, 523 const Instruction &I2, 524 const MemoryLocation &Loc, 525 const ModRefInfo Mode) { 526 assert(I1.getParent() == I2.getParent() && 527 "Instructions not in same basic block!"); 528 BasicBlock::const_iterator I = I1.getIterator(); 529 BasicBlock::const_iterator E = I2.getIterator(); 530 ++E; // Convert from inclusive to exclusive range. 531 532 for (; I != E; ++I) // Check every instruction in range 533 if (getModRefInfo(&*I, Loc) & Mode) 534 return true; 535 return false; 536 } 537 538 // Provide a definition for the root virtual destructor. 539 AAResults::Concept::~Concept() {} 540 541 // Provide a definition for the static object used to identify passes. 542 AnalysisKey AAManager::Key; 543 544 namespace { 545 /// A wrapper pass for external alias analyses. This just squirrels away the 546 /// callback used to run any analyses and register their results. 547 struct ExternalAAWrapperPass : ImmutablePass { 548 typedef std::function<void(Pass &, Function &, AAResults &)> CallbackT; 549 550 CallbackT CB; 551 552 static char ID; 553 554 ExternalAAWrapperPass() : ImmutablePass(ID) { 555 initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry()); 556 } 557 explicit ExternalAAWrapperPass(CallbackT CB) 558 : ImmutablePass(ID), CB(std::move(CB)) { 559 initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry()); 560 } 561 562 void getAnalysisUsage(AnalysisUsage &AU) const override { 563 AU.setPreservesAll(); 564 } 565 }; 566 } 567 568 char ExternalAAWrapperPass::ID = 0; 569 INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis", 570 false, true) 571 572 ImmutablePass * 573 llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) { 574 return new ExternalAAWrapperPass(std::move(Callback)); 575 } 576 577 AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) { 578 initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry()); 579 } 580 581 char AAResultsWrapperPass::ID = 0; 582 583 INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa", 584 "Function Alias Analysis Results", false, true) 585 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 586 INITIALIZE_PASS_DEPENDENCY(CFLAndersAAWrapperPass) 587 INITIALIZE_PASS_DEPENDENCY(CFLSteensAAWrapperPass) 588 INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass) 589 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 590 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass) 591 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass) 592 INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass) 593 INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass) 594 INITIALIZE_PASS_END(AAResultsWrapperPass, "aa", 595 "Function Alias Analysis Results", false, true) 596 597 FunctionPass *llvm::createAAResultsWrapperPass() { 598 return new AAResultsWrapperPass(); 599 } 600 601 /// Run the wrapper pass to rebuild an aggregation over known AA passes. 602 /// 603 /// This is the legacy pass manager's interface to the new-style AA results 604 /// aggregation object. Because this is somewhat shoe-horned into the legacy 605 /// pass manager, we hard code all the specific alias analyses available into 606 /// it. While the particular set enabled is configured via commandline flags, 607 /// adding a new alias analysis to LLVM will require adding support for it to 608 /// this list. 609 bool AAResultsWrapperPass::runOnFunction(Function &F) { 610 // NB! This *must* be reset before adding new AA results to the new 611 // AAResults object because in the legacy pass manager, each instance 612 // of these will refer to the *same* immutable analyses, registering and 613 // unregistering themselves with them. We need to carefully tear down the 614 // previous object first, in this case replacing it with an empty one, before 615 // registering new results. 616 AAR.reset( 617 new AAResults(getAnalysis<TargetLibraryInfoWrapperPass>().getTLI())); 618 619 // BasicAA is always available for function analyses. Also, we add it first 620 // so that it can trump TBAA results when it proves MustAlias. 621 // FIXME: TBAA should have an explicit mode to support this and then we 622 // should reconsider the ordering here. 623 if (!DisableBasicAA) 624 AAR->addAAResult(getAnalysis<BasicAAWrapperPass>().getResult()); 625 626 // Populate the results with the currently available AAs. 627 if (auto *WrapperPass = getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>()) 628 AAR->addAAResult(WrapperPass->getResult()); 629 if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>()) 630 AAR->addAAResult(WrapperPass->getResult()); 631 if (auto *WrapperPass = 632 getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>()) 633 AAR->addAAResult(WrapperPass->getResult()); 634 if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>()) 635 AAR->addAAResult(WrapperPass->getResult()); 636 if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>()) 637 AAR->addAAResult(WrapperPass->getResult()); 638 if (auto *WrapperPass = getAnalysisIfAvailable<CFLAndersAAWrapperPass>()) 639 AAR->addAAResult(WrapperPass->getResult()); 640 if (auto *WrapperPass = getAnalysisIfAvailable<CFLSteensAAWrapperPass>()) 641 AAR->addAAResult(WrapperPass->getResult()); 642 643 // If available, run an external AA providing callback over the results as 644 // well. 645 if (auto *WrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>()) 646 if (WrapperPass->CB) 647 WrapperPass->CB(*this, F, *AAR); 648 649 // Analyses don't mutate the IR, so return false. 650 return false; 651 } 652 653 void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 654 AU.setPreservesAll(); 655 AU.addRequired<BasicAAWrapperPass>(); 656 AU.addRequired<TargetLibraryInfoWrapperPass>(); 657 658 // We also need to mark all the alias analysis passes we will potentially 659 // probe in runOnFunction as used here to ensure the legacy pass manager 660 // preserves them. This hard coding of lists of alias analyses is specific to 661 // the legacy pass manager. 662 AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>(); 663 AU.addUsedIfAvailable<TypeBasedAAWrapperPass>(); 664 AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>(); 665 AU.addUsedIfAvailable<GlobalsAAWrapperPass>(); 666 AU.addUsedIfAvailable<SCEVAAWrapperPass>(); 667 AU.addUsedIfAvailable<CFLAndersAAWrapperPass>(); 668 AU.addUsedIfAvailable<CFLSteensAAWrapperPass>(); 669 } 670 671 AAResults llvm::createLegacyPMAAResults(Pass &P, Function &F, 672 BasicAAResult &BAR) { 673 AAResults AAR(P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI()); 674 675 // Add in our explicitly constructed BasicAA results. 676 if (!DisableBasicAA) 677 AAR.addAAResult(BAR); 678 679 // Populate the results with the other currently available AAs. 680 if (auto *WrapperPass = 681 P.getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>()) 682 AAR.addAAResult(WrapperPass->getResult()); 683 if (auto *WrapperPass = P.getAnalysisIfAvailable<TypeBasedAAWrapperPass>()) 684 AAR.addAAResult(WrapperPass->getResult()); 685 if (auto *WrapperPass = 686 P.getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>()) 687 AAR.addAAResult(WrapperPass->getResult()); 688 if (auto *WrapperPass = P.getAnalysisIfAvailable<GlobalsAAWrapperPass>()) 689 AAR.addAAResult(WrapperPass->getResult()); 690 if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLAndersAAWrapperPass>()) 691 AAR.addAAResult(WrapperPass->getResult()); 692 if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLSteensAAWrapperPass>()) 693 AAR.addAAResult(WrapperPass->getResult()); 694 695 return AAR; 696 } 697 698 bool llvm::isNoAliasCall(const Value *V) { 699 if (auto CS = ImmutableCallSite(V)) 700 return CS.paramHasAttr(0, Attribute::NoAlias); 701 return false; 702 } 703 704 bool llvm::isNoAliasArgument(const Value *V) { 705 if (const Argument *A = dyn_cast<Argument>(V)) 706 return A->hasNoAliasAttr(); 707 return false; 708 } 709 710 bool llvm::isIdentifiedObject(const Value *V) { 711 if (isa<AllocaInst>(V)) 712 return true; 713 if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V)) 714 return true; 715 if (isNoAliasCall(V)) 716 return true; 717 if (const Argument *A = dyn_cast<Argument>(V)) 718 return A->hasNoAliasAttr() || A->hasByValAttr(); 719 return false; 720 } 721 722 bool llvm::isIdentifiedFunctionLocal(const Value *V) { 723 return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasArgument(V); 724 } 725 726 void llvm::getAAResultsAnalysisUsage(AnalysisUsage &AU) { 727 // This function needs to be in sync with llvm::createLegacyPMAAResults -- if 728 // more alias analyses are added to llvm::createLegacyPMAAResults, they need 729 // to be added here also. 730 AU.addRequired<TargetLibraryInfoWrapperPass>(); 731 AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>(); 732 AU.addUsedIfAvailable<TypeBasedAAWrapperPass>(); 733 AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>(); 734 AU.addUsedIfAvailable<GlobalsAAWrapperPass>(); 735 AU.addUsedIfAvailable<CFLAndersAAWrapperPass>(); 736 AU.addUsedIfAvailable<CFLSteensAAWrapperPass>(); 737 } 738