1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements an interprocedural pass that deduces and/or propagates 10 // attributes. This is done in an abstract interpretation style fixpoint 11 // iteration. See the Attributor.h file comment and the class descriptions in 12 // that file for more information. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/IPO/Attributor.h" 17 18 #include "llvm/ADT/PointerIntPair.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/ADT/TinyPtrVector.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/CallGraph.h" 24 #include "llvm/Analysis/CallGraphSCCPass.h" 25 #include "llvm/Analysis/InlineCost.h" 26 #include "llvm/Analysis/MemoryBuiltins.h" 27 #include "llvm/Analysis/MustExecute.h" 28 #include "llvm/IR/Attributes.h" 29 #include "llvm/IR/Constant.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/IR/GlobalValue.h" 32 #include "llvm/IR/GlobalVariable.h" 33 #include "llvm/IR/Instruction.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/IntrinsicInst.h" 36 #include "llvm/IR/ValueHandle.h" 37 #include "llvm/InitializePasses.h" 38 #include "llvm/Support/Casting.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/DebugCounter.h" 42 #include "llvm/Support/FileSystem.h" 43 #include "llvm/Support/GraphWriter.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 46 #include "llvm/Transforms/Utils/Cloning.h" 47 #include "llvm/Transforms/Utils/Local.h" 48 49 #ifdef EXPENSIVE_CHECKS 50 #include "llvm/IR/Verifier.h" 51 #endif 52 53 #include <cassert> 54 #include <string> 55 56 using namespace llvm; 57 58 #define DEBUG_TYPE "attributor" 59 60 DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest", 61 "Determine what attributes are manifested in the IR"); 62 63 STATISTIC(NumFnDeleted, "Number of function deleted"); 64 STATISTIC(NumFnWithExactDefinition, 65 "Number of functions with exact definitions"); 66 STATISTIC(NumFnWithoutExactDefinition, 67 "Number of functions without exact definitions"); 68 STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created"); 69 STATISTIC(NumAttributesTimedOut, 70 "Number of abstract attributes timed out before fixpoint"); 71 STATISTIC(NumAttributesValidFixpoint, 72 "Number of abstract attributes in a valid fixpoint state"); 73 STATISTIC(NumAttributesManifested, 74 "Number of abstract attributes manifested in IR"); 75 76 // TODO: Determine a good default value. 77 // 78 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads 79 // (when run with the first 5 abstract attributes). The results also indicate 80 // that we never reach 32 iterations but always find a fixpoint sooner. 81 // 82 // This will become more evolved once we perform two interleaved fixpoint 83 // iterations: bottom-up and top-down. 84 static cl::opt<unsigned> 85 SetFixpointIterations("attributor-max-iterations", cl::Hidden, 86 cl::desc("Maximal number of fixpoint iterations."), 87 cl::init(32)); 88 89 static cl::opt<unsigned, true> MaxInitializationChainLengthX( 90 "attributor-max-initialization-chain-length", cl::Hidden, 91 cl::desc( 92 "Maximal number of chained initializations (to avoid stack overflows)"), 93 cl::location(MaxInitializationChainLength), cl::init(1024)); 94 unsigned llvm::MaxInitializationChainLength; 95 96 static cl::opt<bool> VerifyMaxFixpointIterations( 97 "attributor-max-iterations-verify", cl::Hidden, 98 cl::desc("Verify that max-iterations is a tight bound for a fixpoint"), 99 cl::init(false)); 100 101 static cl::opt<bool> AnnotateDeclarationCallSites( 102 "attributor-annotate-decl-cs", cl::Hidden, 103 cl::desc("Annotate call sites of function declarations."), cl::init(false)); 104 105 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion", 106 cl::init(true), cl::Hidden); 107 108 static cl::opt<bool> 109 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden, 110 cl::desc("Allow the Attributor to create shallow " 111 "wrappers for non-exact definitions."), 112 cl::init(false)); 113 114 static cl::opt<bool> 115 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden, 116 cl::desc("Allow the Attributor to use IP information " 117 "derived from non-exact functions via cloning"), 118 cl::init(false)); 119 120 // These options can only used for debug builds. 121 #ifndef NDEBUG 122 static cl::list<std::string> 123 SeedAllowList("attributor-seed-allow-list", cl::Hidden, 124 cl::desc("Comma seperated list of attribute names that are " 125 "allowed to be seeded."), 126 cl::ZeroOrMore, cl::CommaSeparated); 127 128 static cl::list<std::string> FunctionSeedAllowList( 129 "attributor-function-seed-allow-list", cl::Hidden, 130 cl::desc("Comma seperated list of function names that are " 131 "allowed to be seeded."), 132 cl::ZeroOrMore, cl::CommaSeparated); 133 #endif 134 135 static cl::opt<bool> 136 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden, 137 cl::desc("Dump the dependency graph to dot files."), 138 cl::init(false)); 139 140 static cl::opt<std::string> DepGraphDotFileNamePrefix( 141 "attributor-depgraph-dot-filename-prefix", cl::Hidden, 142 cl::desc("The prefix used for the CallGraph dot file names.")); 143 144 static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden, 145 cl::desc("View the dependency graph."), 146 cl::init(false)); 147 148 static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden, 149 cl::desc("Print attribute dependencies"), 150 cl::init(false)); 151 152 static cl::opt<bool> EnableCallSiteSpecific( 153 "attributor-enable-call-site-specific-deduction", cl::Hidden, 154 cl::desc("Allow the Attributor to do call site specific analysis"), 155 cl::init(false)); 156 157 static cl::opt<bool> 158 PrintCallGraph("attributor-print-call-graph", cl::Hidden, 159 cl::desc("Print Attributor's internal call graph"), 160 cl::init(false)); 161 162 static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads", 163 cl::Hidden, 164 cl::desc("Try to simplify all loads."), 165 cl::init(true)); 166 167 /// Logic operators for the change status enum class. 168 /// 169 ///{ 170 ChangeStatus llvm::operator|(ChangeStatus L, ChangeStatus R) { 171 return L == ChangeStatus::CHANGED ? L : R; 172 } 173 ChangeStatus &llvm::operator|=(ChangeStatus &L, ChangeStatus R) { 174 L = L | R; 175 return L; 176 } 177 ChangeStatus llvm::operator&(ChangeStatus L, ChangeStatus R) { 178 return L == ChangeStatus::UNCHANGED ? L : R; 179 } 180 ChangeStatus &llvm::operator&=(ChangeStatus &L, ChangeStatus R) { 181 L = L & R; 182 return L; 183 } 184 ///} 185 186 bool AA::isNoSyncInst(Attributor &A, const Instruction &I, 187 const AbstractAttribute &QueryingAA) { 188 // We are looking for volatile instructions or non-relaxed atomics. 189 if (const auto *CB = dyn_cast<CallBase>(&I)) { 190 if (CB->hasFnAttr(Attribute::NoSync)) 191 return true; 192 193 // Non-convergent and readnone imply nosync. 194 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory()) 195 return true; 196 197 if (AANoSync::isNoSyncIntrinsic(&I)) 198 return true; 199 200 const auto &NoSyncAA = A.getAAFor<AANoSync>( 201 QueryingAA, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL); 202 return NoSyncAA.isAssumedNoSync(); 203 } 204 205 if (!I.mayReadOrWriteMemory()) 206 return true; 207 208 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I); 209 } 210 211 bool AA::isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA, 212 const Value &V, bool ForAnalysisOnly) { 213 // TODO: See the AAInstanceInfo class comment. 214 if (!ForAnalysisOnly) 215 return false; 216 auto &InstanceInfoAA = A.getAAFor<AAInstanceInfo>( 217 QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL); 218 return InstanceInfoAA.isAssumedUniqueForAnalysis(); 219 } 220 221 Constant *AA::getInitialValueForObj(Value &Obj, Type &Ty, 222 const TargetLibraryInfo *TLI) { 223 if (isa<AllocaInst>(Obj)) 224 return UndefValue::get(&Ty); 225 if (isAllocationFn(&Obj, TLI)) 226 return getInitialValueOfAllocation(&cast<CallBase>(Obj), TLI, &Ty); 227 auto *GV = dyn_cast<GlobalVariable>(&Obj); 228 if (!GV || !GV->hasLocalLinkage()) 229 return nullptr; 230 if (!GV->hasInitializer()) 231 return UndefValue::get(&Ty); 232 return dyn_cast_or_null<Constant>(getWithType(*GV->getInitializer(), Ty)); 233 } 234 235 bool AA::isValidInScope(const Value &V, const Function *Scope) { 236 if (isa<Constant>(V)) 237 return true; 238 if (auto *I = dyn_cast<Instruction>(&V)) 239 return I->getFunction() == Scope; 240 if (auto *A = dyn_cast<Argument>(&V)) 241 return A->getParent() == Scope; 242 return false; 243 } 244 245 bool AA::isValidAtPosition(const Value &V, const Instruction &CtxI, 246 InformationCache &InfoCache) { 247 if (isa<Constant>(V) || &V == &CtxI) 248 return true; 249 const Function *Scope = CtxI.getFunction(); 250 if (auto *A = dyn_cast<Argument>(&V)) 251 return A->getParent() == Scope; 252 if (auto *I = dyn_cast<Instruction>(&V)) { 253 if (I->getFunction() == Scope) { 254 if (const DominatorTree *DT = 255 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>( 256 *Scope)) 257 return DT->dominates(I, &CtxI); 258 // Local dominance check mostly for the old PM passes. 259 if (I->getParent() == CtxI.getParent()) 260 return llvm::any_of( 261 make_range(I->getIterator(), I->getParent()->end()), 262 [&](const Instruction &AfterI) { return &AfterI == &CtxI; }); 263 } 264 } 265 return false; 266 } 267 268 Value *AA::getWithType(Value &V, Type &Ty) { 269 if (V.getType() == &Ty) 270 return &V; 271 if (isa<PoisonValue>(V)) 272 return PoisonValue::get(&Ty); 273 if (isa<UndefValue>(V)) 274 return UndefValue::get(&Ty); 275 if (auto *C = dyn_cast<Constant>(&V)) { 276 if (C->isNullValue()) 277 return Constant::getNullValue(&Ty); 278 if (C->getType()->isPointerTy() && Ty.isPointerTy()) 279 return ConstantExpr::getPointerCast(C, &Ty); 280 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) { 281 if (C->getType()->isIntegerTy() && Ty.isIntegerTy()) 282 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true); 283 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy()) 284 return ConstantExpr::getFPTrunc(C, &Ty, /* OnlyIfReduced */ true); 285 } 286 } 287 return nullptr; 288 } 289 290 Optional<Value *> 291 AA::combineOptionalValuesInAAValueLatice(const Optional<Value *> &A, 292 const Optional<Value *> &B, Type *Ty) { 293 if (A == B) 294 return A; 295 if (!B.hasValue()) 296 return A; 297 if (*B == nullptr) 298 return nullptr; 299 if (!A.hasValue()) 300 return Ty ? getWithType(**B, *Ty) : nullptr; 301 if (*A == nullptr) 302 return nullptr; 303 if (!Ty) 304 Ty = (*A)->getType(); 305 if (isa_and_nonnull<UndefValue>(*A)) 306 return getWithType(**B, *Ty); 307 if (isa<UndefValue>(*B)) 308 return A; 309 if (*A && *B && *A == getWithType(**B, *Ty)) 310 return A; 311 return nullptr; 312 } 313 314 template <bool IsLoad, typename Ty> 315 static bool getPotentialCopiesOfMemoryValue( 316 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies, 317 SmallSetVector<Instruction *, 4> &PotentialValueOrigins, 318 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, 319 bool OnlyExact) { 320 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I 321 << " (only exact: " << OnlyExact << ")\n";); 322 323 Value &Ptr = *I.getPointerOperand(); 324 SmallVector<Value *, 8> Objects; 325 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, QueryingAA, &I, 326 UsedAssumedInformation)) { 327 LLVM_DEBUG( 328 dbgs() << "Underlying objects stored into could not be determined\n";); 329 return false; 330 } 331 332 // Containers to remember the pointer infos and new copies while we are not 333 // sure that we can find all of them. If we abort we want to avoid spurious 334 // dependences and potential copies in the provided container. 335 SmallVector<const AAPointerInfo *> PIs; 336 SmallVector<Value *> NewCopies; 337 SmallVector<Instruction *> NewCopyOrigins; 338 339 const auto *TLI = 340 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction()); 341 for (Value *Obj : Objects) { 342 LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n"); 343 if (isa<UndefValue>(Obj)) 344 continue; 345 if (isa<ConstantPointerNull>(Obj)) { 346 // A null pointer access can be undefined but any offset from null may 347 // be OK. We do not try to optimize the latter. 348 if (!NullPointerIsDefined(I.getFunction(), 349 Ptr.getType()->getPointerAddressSpace()) && 350 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation) == 351 Obj) 352 continue; 353 LLVM_DEBUG( 354 dbgs() << "Underlying object is a valid nullptr, giving up.\n";); 355 return false; 356 } 357 // TODO: Use assumed noalias return. 358 if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj) && 359 !(IsLoad ? isAllocationFn(Obj, TLI) : isNoAliasCall(Obj))) { 360 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << *Obj 361 << "\n";); 362 return false; 363 } 364 if (auto *GV = dyn_cast<GlobalVariable>(Obj)) 365 if (!GV->hasLocalLinkage()) { 366 LLVM_DEBUG(dbgs() << "Underlying object is global with external " 367 "linkage, not supported yet: " 368 << *Obj << "\n";); 369 return false; 370 } 371 372 if (IsLoad) { 373 Value *InitialValue = AA::getInitialValueForObj(*Obj, *I.getType(), TLI); 374 if (!InitialValue) 375 return false; 376 NewCopies.push_back(InitialValue); 377 NewCopyOrigins.push_back(nullptr); 378 } 379 380 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) { 381 if ((IsLoad && !Acc.isWrite()) || (!IsLoad && !Acc.isRead())) 382 return true; 383 if (IsLoad && Acc.isWrittenValueYetUndetermined()) 384 return true; 385 if (OnlyExact && !IsExact && 386 !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) { 387 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst() 388 << ", abort!\n"); 389 return false; 390 } 391 if (IsLoad) { 392 assert(isa<LoadInst>(I) && "Expected load or store instruction only!"); 393 if (!Acc.isWrittenValueUnknown()) { 394 NewCopies.push_back(Acc.getWrittenValue()); 395 NewCopyOrigins.push_back(Acc.getRemoteInst()); 396 return true; 397 } 398 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst()); 399 if (!SI) { 400 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store " 401 "instruction not supported yet: " 402 << *Acc.getRemoteInst() << "\n";); 403 return false; 404 } 405 NewCopies.push_back(SI->getValueOperand()); 406 NewCopyOrigins.push_back(SI); 407 } else { 408 assert(isa<StoreInst>(I) && "Expected load or store instruction only!"); 409 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst()); 410 if (!LI && OnlyExact) { 411 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load " 412 "instruction not supported yet: " 413 << *Acc.getRemoteInst() << "\n";); 414 return false; 415 } 416 NewCopies.push_back(Acc.getRemoteInst()); 417 } 418 return true; 419 }; 420 421 auto &PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(*Obj), 422 DepClassTy::NONE); 423 if (!PI.forallInterferingAccesses(A, QueryingAA, I, CheckAccess)) { 424 LLVM_DEBUG( 425 dbgs() 426 << "Failed to verify all interfering accesses for underlying object: " 427 << *Obj << "\n"); 428 return false; 429 } 430 PIs.push_back(&PI); 431 } 432 433 // Only if we were successful collection all potential copies we record 434 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the 435 // given PotentialCopies container. 436 for (auto *PI : PIs) { 437 if (!PI->getState().isAtFixpoint()) 438 UsedAssumedInformation = true; 439 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL); 440 } 441 PotentialCopies.insert(NewCopies.begin(), NewCopies.end()); 442 PotentialValueOrigins.insert(NewCopyOrigins.begin(), NewCopyOrigins.end()); 443 444 return true; 445 } 446 447 bool AA::getPotentiallyLoadedValues( 448 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues, 449 SmallSetVector<Instruction *, 4> &PotentialValueOrigins, 450 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, 451 bool OnlyExact) { 452 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>( 453 A, LI, PotentialValues, PotentialValueOrigins, QueryingAA, 454 UsedAssumedInformation, OnlyExact); 455 } 456 457 bool AA::getPotentialCopiesOfStoredValue( 458 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies, 459 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation, 460 bool OnlyExact) { 461 SmallSetVector<Instruction *, 4> PotentialValueOrigins; 462 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>( 463 A, SI, PotentialCopies, PotentialValueOrigins, QueryingAA, 464 UsedAssumedInformation, OnlyExact); 465 } 466 467 static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP, 468 const AbstractAttribute &QueryingAA, 469 bool RequireReadNone, bool &IsKnown) { 470 471 IRPosition::Kind Kind = IRP.getPositionKind(); 472 if (Kind == IRPosition::IRP_FUNCTION || Kind == IRPosition::IRP_CALL_SITE) { 473 const auto &MemLocAA = 474 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE); 475 if (MemLocAA.isAssumedReadNone()) { 476 IsKnown = MemLocAA.isKnownReadNone(); 477 if (!IsKnown) 478 A.recordDependence(MemLocAA, QueryingAA, DepClassTy::OPTIONAL); 479 return true; 480 } 481 } 482 483 const auto &MemBehaviorAA = 484 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE); 485 if (MemBehaviorAA.isAssumedReadNone() || 486 (!RequireReadNone && MemBehaviorAA.isAssumedReadOnly())) { 487 IsKnown = RequireReadNone ? MemBehaviorAA.isKnownReadNone() 488 : MemBehaviorAA.isKnownReadOnly(); 489 if (!IsKnown) 490 A.recordDependence(MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL); 491 return true; 492 } 493 494 return false; 495 } 496 497 bool AA::isAssumedReadOnly(Attributor &A, const IRPosition &IRP, 498 const AbstractAttribute &QueryingAA, bool &IsKnown) { 499 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA, 500 /* RequireReadNone */ false, IsKnown); 501 } 502 bool AA::isAssumedReadNone(Attributor &A, const IRPosition &IRP, 503 const AbstractAttribute &QueryingAA, bool &IsKnown) { 504 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA, 505 /* RequireReadNone */ true, IsKnown); 506 } 507 508 static bool 509 isPotentiallyReachable(Attributor &A, const Instruction &FromI, 510 const Instruction *ToI, const Function &ToFn, 511 const AbstractAttribute &QueryingAA, 512 std::function<bool(const Function &F)> GoBackwardsCB) { 513 LLVM_DEBUG(dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() 514 << " from " << FromI << " [GBCB: " << bool(GoBackwardsCB) 515 << "]\n"); 516 517 SmallPtrSet<const Instruction *, 8> Visited; 518 SmallVector<const Instruction *> Worklist; 519 Worklist.push_back(&FromI); 520 521 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>( 522 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL); 523 while (!Worklist.empty()) { 524 const Instruction *CurFromI = Worklist.pop_back_val(); 525 if (!Visited.insert(CurFromI).second) 526 continue; 527 528 const Function *FromFn = CurFromI->getFunction(); 529 if (FromFn == &ToFn) { 530 if (!ToI) 531 return true; 532 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI 533 << " intraprocedurally\n"); 534 const auto &ReachabilityAA = A.getAAFor<AAReachability>( 535 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL); 536 bool Result = ReachabilityAA.isAssumedReachable(A, *CurFromI, *ToI); 537 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " " 538 << (Result ? "can potentially " : "cannot ") << "reach " 539 << *ToI << " [Intra]\n"); 540 if (Result) 541 return true; 542 if (NoRecurseAA.isAssumedNoRecurse()) 543 continue; 544 } 545 546 // TODO: If we can go arbitrarily backwards we will eventually reach an 547 // entry point that can reach ToI. Only once this takes a set of blocks 548 // through which we cannot go, or once we track internal functions not 549 // accessible from the outside, it makes sense to perform backwards analysis 550 // in the absence of a GoBackwardsCB. 551 if (!GoBackwardsCB) { 552 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " 553 << *CurFromI << " is not checked backwards, abort\n"); 554 return true; 555 } 556 557 // Check if the current instruction is already known to reach the ToFn. 558 const auto &FnReachabilityAA = A.getAAFor<AAFunctionReachability>( 559 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL); 560 bool Result = FnReachabilityAA.instructionCanReach( 561 A, *CurFromI, ToFn, /* UseBackwards */ false); 562 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName() 563 << " " << (Result ? "can potentially " : "cannot ") 564 << "reach @" << ToFn.getName() << " [FromFn]\n"); 565 if (Result) 566 return true; 567 568 // If we do not go backwards from the FromFn we are done here and so far we 569 // could not find a way to reach ToFn/ToI. 570 if (!GoBackwardsCB(*FromFn)) 571 continue; 572 573 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @" 574 << FromFn->getName() << "\n"); 575 576 auto CheckCallSite = [&](AbstractCallSite ACS) { 577 CallBase *CB = ACS.getInstruction(); 578 if (!CB) 579 return false; 580 581 if (isa<InvokeInst>(CB)) 582 return false; 583 584 Instruction *Inst = CB->getNextNonDebugInstruction(); 585 Worklist.push_back(Inst); 586 return true; 587 }; 588 589 bool UsedAssumedInformation = false; 590 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn, 591 /* RequireAllCallSites */ true, 592 &QueryingAA, UsedAssumedInformation); 593 if (Result) { 594 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI 595 << " in @" << FromFn->getName() 596 << " failed, give up\n"); 597 return true; 598 } 599 600 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI 601 << " in @" << FromFn->getName() 602 << " worklist size is: " << Worklist.size() << "\n"); 603 } 604 return false; 605 } 606 607 bool AA::isPotentiallyReachable( 608 Attributor &A, const Instruction &FromI, const Instruction &ToI, 609 const AbstractAttribute &QueryingAA, 610 std::function<bool(const Function &F)> GoBackwardsCB) { 611 LLVM_DEBUG(dbgs() << "[AA] isPotentiallyReachable " << ToI << " from " 612 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "]\n"); 613 const Function *ToFn = ToI.getFunction(); 614 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA, 615 GoBackwardsCB); 616 } 617 618 bool AA::isPotentiallyReachable( 619 Attributor &A, const Instruction &FromI, const Function &ToFn, 620 const AbstractAttribute &QueryingAA, 621 std::function<bool(const Function &F)> GoBackwardsCB) { 622 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA, 623 GoBackwardsCB); 624 } 625 626 /// Return true if \p New is equal or worse than \p Old. 627 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) { 628 if (!Old.isIntAttribute()) 629 return true; 630 631 return Old.getValueAsInt() >= New.getValueAsInt(); 632 } 633 634 /// Return true if the information provided by \p Attr was added to the 635 /// attribute list \p Attrs. This is only the case if it was not already present 636 /// in \p Attrs at the position describe by \p PK and \p AttrIdx. 637 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr, 638 AttributeList &Attrs, int AttrIdx, 639 bool ForceReplace = false) { 640 641 if (Attr.isEnumAttribute()) { 642 Attribute::AttrKind Kind = Attr.getKindAsEnum(); 643 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind)) 644 if (!ForceReplace && 645 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind))) 646 return false; 647 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr); 648 return true; 649 } 650 if (Attr.isStringAttribute()) { 651 StringRef Kind = Attr.getKindAsString(); 652 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind)) 653 if (!ForceReplace && 654 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind))) 655 return false; 656 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr); 657 return true; 658 } 659 if (Attr.isIntAttribute()) { 660 Attribute::AttrKind Kind = Attr.getKindAsEnum(); 661 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind)) 662 if (!ForceReplace && 663 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind))) 664 return false; 665 Attrs = Attrs.removeAttributeAtIndex(Ctx, AttrIdx, Kind); 666 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr); 667 return true; 668 } 669 670 llvm_unreachable("Expected enum or string attribute!"); 671 } 672 673 Argument *IRPosition::getAssociatedArgument() const { 674 if (getPositionKind() == IRP_ARGUMENT) 675 return cast<Argument>(&getAnchorValue()); 676 677 // Not an Argument and no argument number means this is not a call site 678 // argument, thus we cannot find a callback argument to return. 679 int ArgNo = getCallSiteArgNo(); 680 if (ArgNo < 0) 681 return nullptr; 682 683 // Use abstract call sites to make the connection between the call site 684 // values and the ones in callbacks. If a callback was found that makes use 685 // of the underlying call site operand, we want the corresponding callback 686 // callee argument and not the direct callee argument. 687 Optional<Argument *> CBCandidateArg; 688 SmallVector<const Use *, 4> CallbackUses; 689 const auto &CB = cast<CallBase>(getAnchorValue()); 690 AbstractCallSite::getCallbackUses(CB, CallbackUses); 691 for (const Use *U : CallbackUses) { 692 AbstractCallSite ACS(U); 693 assert(ACS && ACS.isCallbackCall()); 694 if (!ACS.getCalledFunction()) 695 continue; 696 697 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) { 698 699 // Test if the underlying call site operand is argument number u of the 700 // callback callee. 701 if (ACS.getCallArgOperandNo(u) != ArgNo) 702 continue; 703 704 assert(ACS.getCalledFunction()->arg_size() > u && 705 "ACS mapped into var-args arguments!"); 706 if (CBCandidateArg.hasValue()) { 707 CBCandidateArg = nullptr; 708 break; 709 } 710 CBCandidateArg = ACS.getCalledFunction()->getArg(u); 711 } 712 } 713 714 // If we found a unique callback candidate argument, return it. 715 if (CBCandidateArg.hasValue() && CBCandidateArg.getValue()) 716 return CBCandidateArg.getValue(); 717 718 // If no callbacks were found, or none used the underlying call site operand 719 // exclusively, use the direct callee argument if available. 720 const Function *Callee = CB.getCalledFunction(); 721 if (Callee && Callee->arg_size() > unsigned(ArgNo)) 722 return Callee->getArg(ArgNo); 723 724 return nullptr; 725 } 726 727 ChangeStatus AbstractAttribute::update(Attributor &A) { 728 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 729 if (getState().isAtFixpoint()) 730 return HasChanged; 731 732 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n"); 733 734 HasChanged = updateImpl(A); 735 736 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this 737 << "\n"); 738 739 return HasChanged; 740 } 741 742 ChangeStatus 743 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP, 744 const ArrayRef<Attribute> &DeducedAttrs, 745 bool ForceReplace) { 746 Function *ScopeFn = IRP.getAnchorScope(); 747 IRPosition::Kind PK = IRP.getPositionKind(); 748 749 // In the following some generic code that will manifest attributes in 750 // DeducedAttrs if they improve the current IR. Due to the different 751 // annotation positions we use the underlying AttributeList interface. 752 753 AttributeList Attrs; 754 switch (PK) { 755 case IRPosition::IRP_INVALID: 756 case IRPosition::IRP_FLOAT: 757 return ChangeStatus::UNCHANGED; 758 case IRPosition::IRP_ARGUMENT: 759 case IRPosition::IRP_FUNCTION: 760 case IRPosition::IRP_RETURNED: 761 Attrs = ScopeFn->getAttributes(); 762 break; 763 case IRPosition::IRP_CALL_SITE: 764 case IRPosition::IRP_CALL_SITE_RETURNED: 765 case IRPosition::IRP_CALL_SITE_ARGUMENT: 766 Attrs = cast<CallBase>(IRP.getAnchorValue()).getAttributes(); 767 break; 768 } 769 770 ChangeStatus HasChanged = ChangeStatus::UNCHANGED; 771 LLVMContext &Ctx = IRP.getAnchorValue().getContext(); 772 for (const Attribute &Attr : DeducedAttrs) { 773 if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx(), ForceReplace)) 774 continue; 775 776 HasChanged = ChangeStatus::CHANGED; 777 } 778 779 if (HasChanged == ChangeStatus::UNCHANGED) 780 return HasChanged; 781 782 switch (PK) { 783 case IRPosition::IRP_ARGUMENT: 784 case IRPosition::IRP_FUNCTION: 785 case IRPosition::IRP_RETURNED: 786 ScopeFn->setAttributes(Attrs); 787 break; 788 case IRPosition::IRP_CALL_SITE: 789 case IRPosition::IRP_CALL_SITE_RETURNED: 790 case IRPosition::IRP_CALL_SITE_ARGUMENT: 791 cast<CallBase>(IRP.getAnchorValue()).setAttributes(Attrs); 792 break; 793 case IRPosition::IRP_INVALID: 794 case IRPosition::IRP_FLOAT: 795 break; 796 } 797 798 return HasChanged; 799 } 800 801 const IRPosition IRPosition::EmptyKey(DenseMapInfo<void *>::getEmptyKey()); 802 const IRPosition 803 IRPosition::TombstoneKey(DenseMapInfo<void *>::getTombstoneKey()); 804 805 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) { 806 IRPositions.emplace_back(IRP); 807 808 // Helper to determine if operand bundles on a call site are benin or 809 // potentially problematic. We handle only llvm.assume for now. 810 auto CanIgnoreOperandBundles = [](const CallBase &CB) { 811 return (isa<IntrinsicInst>(CB) && 812 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume); 813 }; 814 815 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue()); 816 switch (IRP.getPositionKind()) { 817 case IRPosition::IRP_INVALID: 818 case IRPosition::IRP_FLOAT: 819 case IRPosition::IRP_FUNCTION: 820 return; 821 case IRPosition::IRP_ARGUMENT: 822 case IRPosition::IRP_RETURNED: 823 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope())); 824 return; 825 case IRPosition::IRP_CALL_SITE: 826 assert(CB && "Expected call site!"); 827 // TODO: We need to look at the operand bundles similar to the redirection 828 // in CallBase. 829 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) 830 if (const Function *Callee = CB->getCalledFunction()) 831 IRPositions.emplace_back(IRPosition::function(*Callee)); 832 return; 833 case IRPosition::IRP_CALL_SITE_RETURNED: 834 assert(CB && "Expected call site!"); 835 // TODO: We need to look at the operand bundles similar to the redirection 836 // in CallBase. 837 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) { 838 if (const Function *Callee = CB->getCalledFunction()) { 839 IRPositions.emplace_back(IRPosition::returned(*Callee)); 840 IRPositions.emplace_back(IRPosition::function(*Callee)); 841 for (const Argument &Arg : Callee->args()) 842 if (Arg.hasReturnedAttr()) { 843 IRPositions.emplace_back( 844 IRPosition::callsite_argument(*CB, Arg.getArgNo())); 845 IRPositions.emplace_back( 846 IRPosition::value(*CB->getArgOperand(Arg.getArgNo()))); 847 IRPositions.emplace_back(IRPosition::argument(Arg)); 848 } 849 } 850 } 851 IRPositions.emplace_back(IRPosition::callsite_function(*CB)); 852 return; 853 case IRPosition::IRP_CALL_SITE_ARGUMENT: { 854 assert(CB && "Expected call site!"); 855 // TODO: We need to look at the operand bundles similar to the redirection 856 // in CallBase. 857 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) { 858 const Function *Callee = CB->getCalledFunction(); 859 if (Callee) { 860 if (Argument *Arg = IRP.getAssociatedArgument()) 861 IRPositions.emplace_back(IRPosition::argument(*Arg)); 862 IRPositions.emplace_back(IRPosition::function(*Callee)); 863 } 864 } 865 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue())); 866 return; 867 } 868 } 869 } 870 871 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs, 872 bool IgnoreSubsumingPositions, Attributor *A) const { 873 SmallVector<Attribute, 4> Attrs; 874 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) { 875 for (Attribute::AttrKind AK : AKs) 876 if (EquivIRP.getAttrsFromIRAttr(AK, Attrs)) 877 return true; 878 // The first position returned by the SubsumingPositionIterator is 879 // always the position itself. If we ignore subsuming positions we 880 // are done after the first iteration. 881 if (IgnoreSubsumingPositions) 882 break; 883 } 884 if (A) 885 for (Attribute::AttrKind AK : AKs) 886 if (getAttrsFromAssumes(AK, Attrs, *A)) 887 return true; 888 return false; 889 } 890 891 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs, 892 SmallVectorImpl<Attribute> &Attrs, 893 bool IgnoreSubsumingPositions, Attributor *A) const { 894 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) { 895 for (Attribute::AttrKind AK : AKs) 896 EquivIRP.getAttrsFromIRAttr(AK, Attrs); 897 // The first position returned by the SubsumingPositionIterator is 898 // always the position itself. If we ignore subsuming positions we 899 // are done after the first iteration. 900 if (IgnoreSubsumingPositions) 901 break; 902 } 903 if (A) 904 for (Attribute::AttrKind AK : AKs) 905 getAttrsFromAssumes(AK, Attrs, *A); 906 } 907 908 bool IRPosition::getAttrsFromIRAttr(Attribute::AttrKind AK, 909 SmallVectorImpl<Attribute> &Attrs) const { 910 if (getPositionKind() == IRP_INVALID || getPositionKind() == IRP_FLOAT) 911 return false; 912 913 AttributeList AttrList; 914 if (const auto *CB = dyn_cast<CallBase>(&getAnchorValue())) 915 AttrList = CB->getAttributes(); 916 else 917 AttrList = getAssociatedFunction()->getAttributes(); 918 919 bool HasAttr = AttrList.hasAttributeAtIndex(getAttrIdx(), AK); 920 if (HasAttr) 921 Attrs.push_back(AttrList.getAttributeAtIndex(getAttrIdx(), AK)); 922 return HasAttr; 923 } 924 925 bool IRPosition::getAttrsFromAssumes(Attribute::AttrKind AK, 926 SmallVectorImpl<Attribute> &Attrs, 927 Attributor &A) const { 928 assert(getPositionKind() != IRP_INVALID && "Did expect a valid position!"); 929 Value &AssociatedValue = getAssociatedValue(); 930 931 const Assume2KnowledgeMap &A2K = 932 A.getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK}); 933 934 // Check if we found any potential assume use, if not we don't need to create 935 // explorer iterators. 936 if (A2K.empty()) 937 return false; 938 939 LLVMContext &Ctx = AssociatedValue.getContext(); 940 unsigned AttrsSize = Attrs.size(); 941 MustBeExecutedContextExplorer &Explorer = 942 A.getInfoCache().getMustBeExecutedContextExplorer(); 943 auto EIt = Explorer.begin(getCtxI()), EEnd = Explorer.end(getCtxI()); 944 for (auto &It : A2K) 945 if (Explorer.findInContextOf(It.first, EIt, EEnd)) 946 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max)); 947 return AttrsSize != Attrs.size(); 948 } 949 950 void IRPosition::verify() { 951 #ifdef EXPENSIVE_CHECKS 952 switch (getPositionKind()) { 953 case IRP_INVALID: 954 assert((CBContext == nullptr) && 955 "Invalid position must not have CallBaseContext!"); 956 assert(!Enc.getOpaqueValue() && 957 "Expected a nullptr for an invalid position!"); 958 return; 959 case IRP_FLOAT: 960 assert((!isa<Argument>(&getAssociatedValue())) && 961 "Expected specialized kind for argument values!"); 962 return; 963 case IRP_RETURNED: 964 assert(isa<Function>(getAsValuePtr()) && 965 "Expected function for a 'returned' position!"); 966 assert(getAsValuePtr() == &getAssociatedValue() && 967 "Associated value mismatch!"); 968 return; 969 case IRP_CALL_SITE_RETURNED: 970 assert((CBContext == nullptr) && 971 "'call site returned' position must not have CallBaseContext!"); 972 assert((isa<CallBase>(getAsValuePtr())) && 973 "Expected call base for 'call site returned' position!"); 974 assert(getAsValuePtr() == &getAssociatedValue() && 975 "Associated value mismatch!"); 976 return; 977 case IRP_CALL_SITE: 978 assert((CBContext == nullptr) && 979 "'call site function' position must not have CallBaseContext!"); 980 assert((isa<CallBase>(getAsValuePtr())) && 981 "Expected call base for 'call site function' position!"); 982 assert(getAsValuePtr() == &getAssociatedValue() && 983 "Associated value mismatch!"); 984 return; 985 case IRP_FUNCTION: 986 assert(isa<Function>(getAsValuePtr()) && 987 "Expected function for a 'function' position!"); 988 assert(getAsValuePtr() == &getAssociatedValue() && 989 "Associated value mismatch!"); 990 return; 991 case IRP_ARGUMENT: 992 assert(isa<Argument>(getAsValuePtr()) && 993 "Expected argument for a 'argument' position!"); 994 assert(getAsValuePtr() == &getAssociatedValue() && 995 "Associated value mismatch!"); 996 return; 997 case IRP_CALL_SITE_ARGUMENT: { 998 assert((CBContext == nullptr) && 999 "'call site argument' position must not have CallBaseContext!"); 1000 Use *U = getAsUsePtr(); 1001 (void)U; // Silence unused variable warning. 1002 assert(U && "Expected use for a 'call site argument' position!"); 1003 assert(isa<CallBase>(U->getUser()) && 1004 "Expected call base user for a 'call site argument' position!"); 1005 assert(cast<CallBase>(U->getUser())->isArgOperand(U) && 1006 "Expected call base argument operand for a 'call site argument' " 1007 "position"); 1008 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) == 1009 unsigned(getCallSiteArgNo()) && 1010 "Argument number mismatch!"); 1011 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!"); 1012 return; 1013 } 1014 } 1015 #endif 1016 } 1017 1018 Optional<Constant *> 1019 Attributor::getAssumedConstant(const IRPosition &IRP, 1020 const AbstractAttribute &AA, 1021 bool &UsedAssumedInformation) { 1022 // First check all callbacks provided by outside AAs. If any of them returns 1023 // a non-null value that is different from the associated value, or None, we 1024 // assume it's simpliied. 1025 for (auto &CB : SimplificationCallbacks.lookup(IRP)) { 1026 Optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation); 1027 if (!SimplifiedV.hasValue()) 1028 return llvm::None; 1029 if (isa_and_nonnull<Constant>(*SimplifiedV)) 1030 return cast<Constant>(*SimplifiedV); 1031 return nullptr; 1032 } 1033 const auto &ValueSimplifyAA = 1034 getAAFor<AAValueSimplify>(AA, IRP, DepClassTy::NONE); 1035 Optional<Value *> SimplifiedV = 1036 ValueSimplifyAA.getAssumedSimplifiedValue(*this); 1037 bool IsKnown = ValueSimplifyAA.isAtFixpoint(); 1038 UsedAssumedInformation |= !IsKnown; 1039 if (!SimplifiedV.hasValue()) { 1040 recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL); 1041 return llvm::None; 1042 } 1043 if (isa_and_nonnull<UndefValue>(SimplifiedV.getValue())) { 1044 recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL); 1045 return UndefValue::get(IRP.getAssociatedType()); 1046 } 1047 Constant *CI = dyn_cast_or_null<Constant>(SimplifiedV.getValue()); 1048 if (CI) 1049 CI = dyn_cast_or_null<Constant>( 1050 AA::getWithType(*CI, *IRP.getAssociatedType())); 1051 if (CI) 1052 recordDependence(ValueSimplifyAA, AA, DepClassTy::OPTIONAL); 1053 return CI; 1054 } 1055 1056 Optional<Value *> 1057 Attributor::getAssumedSimplified(const IRPosition &IRP, 1058 const AbstractAttribute *AA, 1059 bool &UsedAssumedInformation) { 1060 // First check all callbacks provided by outside AAs. If any of them returns 1061 // a non-null value that is different from the associated value, or None, we 1062 // assume it's simpliied. 1063 for (auto &CB : SimplificationCallbacks.lookup(IRP)) 1064 return CB(IRP, AA, UsedAssumedInformation); 1065 1066 // If no high-level/outside simplification occurred, use AAValueSimplify. 1067 const auto &ValueSimplifyAA = 1068 getOrCreateAAFor<AAValueSimplify>(IRP, AA, DepClassTy::NONE); 1069 Optional<Value *> SimplifiedV = 1070 ValueSimplifyAA.getAssumedSimplifiedValue(*this); 1071 bool IsKnown = ValueSimplifyAA.isAtFixpoint(); 1072 UsedAssumedInformation |= !IsKnown; 1073 if (!SimplifiedV.hasValue()) { 1074 if (AA) 1075 recordDependence(ValueSimplifyAA, *AA, DepClassTy::OPTIONAL); 1076 return llvm::None; 1077 } 1078 if (*SimplifiedV == nullptr) 1079 return const_cast<Value *>(&IRP.getAssociatedValue()); 1080 if (Value *SimpleV = 1081 AA::getWithType(**SimplifiedV, *IRP.getAssociatedType())) { 1082 if (AA) 1083 recordDependence(ValueSimplifyAA, *AA, DepClassTy::OPTIONAL); 1084 return SimpleV; 1085 } 1086 return const_cast<Value *>(&IRP.getAssociatedValue()); 1087 } 1088 1089 Optional<Value *> Attributor::translateArgumentToCallSiteContent( 1090 Optional<Value *> V, CallBase &CB, const AbstractAttribute &AA, 1091 bool &UsedAssumedInformation) { 1092 if (!V.hasValue()) 1093 return V; 1094 if (*V == nullptr || isa<Constant>(*V)) 1095 return V; 1096 if (auto *Arg = dyn_cast<Argument>(*V)) 1097 if (CB.getCalledFunction() == Arg->getParent()) 1098 if (!Arg->hasPointeeInMemoryValueAttr()) 1099 return getAssumedSimplified( 1100 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA, 1101 UsedAssumedInformation); 1102 return nullptr; 1103 } 1104 1105 Attributor::~Attributor() { 1106 // The abstract attributes are allocated via the BumpPtrAllocator Allocator, 1107 // thus we cannot delete them. We can, and want to, destruct them though. 1108 for (auto &DepAA : DG.SyntheticRoot.Deps) { 1109 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer()); 1110 AA->~AbstractAttribute(); 1111 } 1112 } 1113 1114 bool Attributor::isAssumedDead(const AbstractAttribute &AA, 1115 const AAIsDead *FnLivenessAA, 1116 bool &UsedAssumedInformation, 1117 bool CheckBBLivenessOnly, DepClassTy DepClass) { 1118 const IRPosition &IRP = AA.getIRPosition(); 1119 if (!Functions.count(IRP.getAnchorScope())) 1120 return false; 1121 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation, 1122 CheckBBLivenessOnly, DepClass); 1123 } 1124 1125 bool Attributor::isAssumedDead(const Use &U, 1126 const AbstractAttribute *QueryingAA, 1127 const AAIsDead *FnLivenessAA, 1128 bool &UsedAssumedInformation, 1129 bool CheckBBLivenessOnly, DepClassTy DepClass) { 1130 Instruction *UserI = dyn_cast<Instruction>(U.getUser()); 1131 if (!UserI) 1132 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA, 1133 UsedAssumedInformation, CheckBBLivenessOnly, DepClass); 1134 1135 if (auto *CB = dyn_cast<CallBase>(UserI)) { 1136 // For call site argument uses we can check if the argument is 1137 // unused/dead. 1138 if (CB->isArgOperand(&U)) { 1139 const IRPosition &CSArgPos = 1140 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U)); 1141 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA, 1142 UsedAssumedInformation, CheckBBLivenessOnly, 1143 DepClass); 1144 } 1145 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) { 1146 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction()); 1147 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA, 1148 UsedAssumedInformation, CheckBBLivenessOnly, DepClass); 1149 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) { 1150 BasicBlock *IncomingBB = PHI->getIncomingBlock(U); 1151 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA, 1152 UsedAssumedInformation, CheckBBLivenessOnly, DepClass); 1153 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { 1154 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) { 1155 const IRPosition IRP = IRPosition::inst(*SI); 1156 const AAIsDead &IsDeadAA = 1157 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE); 1158 if (IsDeadAA.isRemovableStore()) { 1159 if (QueryingAA) 1160 recordDependence(IsDeadAA, *QueryingAA, DepClass); 1161 if (!IsDeadAA.isKnown(AAIsDead::IS_REMOVABLE)) 1162 UsedAssumedInformation = true; 1163 return true; 1164 } 1165 } 1166 } 1167 1168 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA, 1169 UsedAssumedInformation, CheckBBLivenessOnly, DepClass); 1170 } 1171 1172 bool Attributor::isAssumedDead(const Instruction &I, 1173 const AbstractAttribute *QueryingAA, 1174 const AAIsDead *FnLivenessAA, 1175 bool &UsedAssumedInformation, 1176 bool CheckBBLivenessOnly, DepClassTy DepClass) { 1177 const IRPosition::CallBaseContext *CBCtx = 1178 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr; 1179 1180 if (ManifestAddedBlocks.contains(I.getParent())) 1181 return false; 1182 1183 if (!FnLivenessAA) 1184 FnLivenessAA = 1185 lookupAAFor<AAIsDead>(IRPosition::function(*I.getFunction(), CBCtx), 1186 QueryingAA, DepClassTy::NONE); 1187 1188 // If we have a context instruction and a liveness AA we use it. 1189 if (FnLivenessAA && 1190 FnLivenessAA->getIRPosition().getAnchorScope() == I.getFunction() && 1191 (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent()) 1192 : FnLivenessAA->isAssumedDead(&I))) { 1193 if (QueryingAA) 1194 recordDependence(*FnLivenessAA, *QueryingAA, DepClass); 1195 if (!FnLivenessAA->isKnownDead(&I)) 1196 UsedAssumedInformation = true; 1197 return true; 1198 } 1199 1200 if (CheckBBLivenessOnly) 1201 return false; 1202 1203 const IRPosition IRP = IRPosition::inst(I, CBCtx); 1204 const AAIsDead &IsDeadAA = 1205 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE); 1206 // Don't check liveness for AAIsDead. 1207 if (QueryingAA == &IsDeadAA) 1208 return false; 1209 1210 if (IsDeadAA.isAssumedDead()) { 1211 if (QueryingAA) 1212 recordDependence(IsDeadAA, *QueryingAA, DepClass); 1213 if (!IsDeadAA.isKnownDead()) 1214 UsedAssumedInformation = true; 1215 return true; 1216 } 1217 1218 return false; 1219 } 1220 1221 bool Attributor::isAssumedDead(const IRPosition &IRP, 1222 const AbstractAttribute *QueryingAA, 1223 const AAIsDead *FnLivenessAA, 1224 bool &UsedAssumedInformation, 1225 bool CheckBBLivenessOnly, DepClassTy DepClass) { 1226 Instruction *CtxI = IRP.getCtxI(); 1227 if (CtxI && 1228 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation, 1229 /* CheckBBLivenessOnly */ true, 1230 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL)) 1231 return true; 1232 1233 if (CheckBBLivenessOnly) 1234 return false; 1235 1236 // If we haven't succeeded we query the specific liveness info for the IRP. 1237 const AAIsDead *IsDeadAA; 1238 if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE) 1239 IsDeadAA = &getOrCreateAAFor<AAIsDead>( 1240 IRPosition::callsite_returned(cast<CallBase>(IRP.getAssociatedValue())), 1241 QueryingAA, DepClassTy::NONE); 1242 else 1243 IsDeadAA = &getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE); 1244 // Don't check liveness for AAIsDead. 1245 if (QueryingAA == IsDeadAA) 1246 return false; 1247 1248 if (IsDeadAA->isAssumedDead()) { 1249 if (QueryingAA) 1250 recordDependence(*IsDeadAA, *QueryingAA, DepClass); 1251 if (!IsDeadAA->isKnownDead()) 1252 UsedAssumedInformation = true; 1253 return true; 1254 } 1255 1256 return false; 1257 } 1258 1259 bool Attributor::isAssumedDead(const BasicBlock &BB, 1260 const AbstractAttribute *QueryingAA, 1261 const AAIsDead *FnLivenessAA, 1262 DepClassTy DepClass) { 1263 if (!FnLivenessAA) 1264 FnLivenessAA = lookupAAFor<AAIsDead>(IRPosition::function(*BB.getParent()), 1265 QueryingAA, DepClassTy::NONE); 1266 if (FnLivenessAA->isAssumedDead(&BB)) { 1267 if (QueryingAA) 1268 recordDependence(*FnLivenessAA, *QueryingAA, DepClass); 1269 return true; 1270 } 1271 1272 return false; 1273 } 1274 1275 bool Attributor::checkForAllUses( 1276 function_ref<bool(const Use &, bool &)> Pred, 1277 const AbstractAttribute &QueryingAA, const Value &V, 1278 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass, 1279 bool IgnoreDroppableUses, 1280 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) { 1281 1282 // Check the trivial case first as it catches void values. 1283 if (V.use_empty()) 1284 return true; 1285 1286 const IRPosition &IRP = QueryingAA.getIRPosition(); 1287 SmallVector<const Use *, 16> Worklist; 1288 SmallPtrSet<const Use *, 16> Visited; 1289 1290 for (const Use &U : V.uses()) 1291 Worklist.push_back(&U); 1292 1293 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size() 1294 << " initial uses to check\n"); 1295 1296 const Function *ScopeFn = IRP.getAnchorScope(); 1297 const auto *LivenessAA = 1298 ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn), 1299 DepClassTy::NONE) 1300 : nullptr; 1301 1302 while (!Worklist.empty()) { 1303 const Use *U = Worklist.pop_back_val(); 1304 if (isa<PHINode>(U->getUser()) && !Visited.insert(U).second) 1305 continue; 1306 LLVM_DEBUG({ 1307 if (auto *Fn = dyn_cast<Function>(U->getUser())) 1308 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName() 1309 << "\n"; 1310 else 1311 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser() 1312 << "\n"; 1313 }); 1314 bool UsedAssumedInformation = false; 1315 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation, 1316 CheckBBLivenessOnly, LivenessDepClass)) { 1317 LLVM_DEBUG(dbgs() << "[Attributor] Dead use, skip!\n"); 1318 continue; 1319 } 1320 if (IgnoreDroppableUses && U->getUser()->isDroppable()) { 1321 LLVM_DEBUG(dbgs() << "[Attributor] Droppable user, skip!\n"); 1322 continue; 1323 } 1324 1325 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) { 1326 if (&SI->getOperandUse(0) == U) { 1327 if (!Visited.insert(U).second) 1328 continue; 1329 SmallSetVector<Value *, 4> PotentialCopies; 1330 if (AA::getPotentialCopiesOfStoredValue( 1331 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation, 1332 /* OnlyExact */ true)) { 1333 LLVM_DEBUG(dbgs() << "[Attributor] Value is stored, continue with " 1334 << PotentialCopies.size() 1335 << " potential copies instead!\n"); 1336 for (Value *PotentialCopy : PotentialCopies) 1337 for (const Use &CopyUse : PotentialCopy->uses()) { 1338 if (EquivalentUseCB && !EquivalentUseCB(*U, CopyUse)) { 1339 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was " 1340 "rejected by the equivalence call back: " 1341 << *CopyUse << "!\n"); 1342 return false; 1343 } 1344 Worklist.push_back(&CopyUse); 1345 } 1346 continue; 1347 } 1348 } 1349 } 1350 1351 bool Follow = false; 1352 if (!Pred(*U, Follow)) 1353 return false; 1354 if (!Follow) 1355 continue; 1356 for (const Use &UU : U->getUser()->uses()) 1357 Worklist.push_back(&UU); 1358 } 1359 1360 return true; 1361 } 1362 1363 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred, 1364 const AbstractAttribute &QueryingAA, 1365 bool RequireAllCallSites, 1366 bool &UsedAssumedInformation) { 1367 // We can try to determine information from 1368 // the call sites. However, this is only possible all call sites are known, 1369 // hence the function has internal linkage. 1370 const IRPosition &IRP = QueryingAA.getIRPosition(); 1371 const Function *AssociatedFunction = IRP.getAssociatedFunction(); 1372 if (!AssociatedFunction) { 1373 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP 1374 << "\n"); 1375 return false; 1376 } 1377 1378 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites, 1379 &QueryingAA, UsedAssumedInformation); 1380 } 1381 1382 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred, 1383 const Function &Fn, 1384 bool RequireAllCallSites, 1385 const AbstractAttribute *QueryingAA, 1386 bool &UsedAssumedInformation) { 1387 if (RequireAllCallSites && !Fn.hasLocalLinkage()) { 1388 LLVM_DEBUG( 1389 dbgs() 1390 << "[Attributor] Function " << Fn.getName() 1391 << " has no internal linkage, hence not all call sites are known\n"); 1392 return false; 1393 } 1394 1395 SmallVector<const Use *, 8> Uses(make_pointer_range(Fn.uses())); 1396 for (unsigned u = 0; u < Uses.size(); ++u) { 1397 const Use &U = *Uses[u]; 1398 LLVM_DEBUG({ 1399 if (auto *Fn = dyn_cast<Function>(U)) 1400 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in " 1401 << *U.getUser() << "\n"; 1402 else 1403 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser() 1404 << "\n"; 1405 }); 1406 if (isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation, 1407 /* CheckBBLivenessOnly */ true)) { 1408 LLVM_DEBUG(dbgs() << "[Attributor] Dead use, skip!\n"); 1409 continue; 1410 } 1411 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) { 1412 if (CE->isCast() && CE->getType()->isPointerTy()) { 1413 LLVM_DEBUG( 1414 dbgs() << "[Attributor] Use, is constant cast expression, add " 1415 << CE->getNumUses() 1416 << " uses of that expression instead!\n"); 1417 for (const Use &CEU : CE->uses()) 1418 Uses.push_back(&CEU); 1419 continue; 1420 } 1421 } 1422 1423 AbstractCallSite ACS(&U); 1424 if (!ACS) { 1425 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName() 1426 << " has non call site use " << *U.get() << " in " 1427 << *U.getUser() << "\n"); 1428 // BlockAddress users are allowed. 1429 if (isa<BlockAddress>(U.getUser())) 1430 continue; 1431 return false; 1432 } 1433 1434 const Use *EffectiveUse = 1435 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U; 1436 if (!ACS.isCallee(EffectiveUse)) { 1437 if (!RequireAllCallSites) { 1438 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser() 1439 << " is not a call of " << Fn.getName() 1440 << ", skip use\n"); 1441 continue; 1442 } 1443 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser() 1444 << " is an invalid use of " << Fn.getName() << "\n"); 1445 return false; 1446 } 1447 1448 // Make sure the arguments that can be matched between the call site and the 1449 // callee argee on their type. It is unlikely they do not and it doesn't 1450 // make sense for all attributes to know/care about this. 1451 assert(&Fn == ACS.getCalledFunction() && "Expected known callee"); 1452 unsigned MinArgsParams = 1453 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size()); 1454 for (unsigned u = 0; u < MinArgsParams; ++u) { 1455 Value *CSArgOp = ACS.getCallArgOperand(u); 1456 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) { 1457 LLVM_DEBUG( 1458 dbgs() << "[Attributor] Call site / callee argument type mismatch [" 1459 << u << "@" << Fn.getName() << ": " 1460 << *Fn.getArg(u)->getType() << " vs. " 1461 << *ACS.getCallArgOperand(u)->getType() << "\n"); 1462 return false; 1463 } 1464 } 1465 1466 if (Pred(ACS)) 1467 continue; 1468 1469 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for " 1470 << *ACS.getInstruction() << "\n"); 1471 return false; 1472 } 1473 1474 return true; 1475 } 1476 1477 bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) { 1478 // TODO: Maintain a cache of Values that are 1479 // on the pathway from a Argument to a Instruction that would effect the 1480 // liveness/return state etc. 1481 return EnableCallSiteSpecific; 1482 } 1483 1484 bool Attributor::checkForAllReturnedValuesAndReturnInsts( 1485 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred, 1486 const AbstractAttribute &QueryingAA) { 1487 1488 const IRPosition &IRP = QueryingAA.getIRPosition(); 1489 // Since we need to provide return instructions we have to have an exact 1490 // definition. 1491 const Function *AssociatedFunction = IRP.getAssociatedFunction(); 1492 if (!AssociatedFunction) 1493 return false; 1494 1495 // If this is a call site query we use the call site specific return values 1496 // and liveness information. 1497 // TODO: use the function scope once we have call site AAReturnedValues. 1498 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction); 1499 const auto &AARetVal = 1500 getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED); 1501 if (!AARetVal.getState().isValidState()) 1502 return false; 1503 1504 return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred); 1505 } 1506 1507 bool Attributor::checkForAllReturnedValues( 1508 function_ref<bool(Value &)> Pred, const AbstractAttribute &QueryingAA) { 1509 1510 const IRPosition &IRP = QueryingAA.getIRPosition(); 1511 const Function *AssociatedFunction = IRP.getAssociatedFunction(); 1512 if (!AssociatedFunction) 1513 return false; 1514 1515 // TODO: use the function scope once we have call site AAReturnedValues. 1516 const IRPosition &QueryIRP = IRPosition::function( 1517 *AssociatedFunction, QueryingAA.getCallBaseContext()); 1518 const auto &AARetVal = 1519 getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED); 1520 if (!AARetVal.getState().isValidState()) 1521 return false; 1522 1523 return AARetVal.checkForAllReturnedValuesAndReturnInsts( 1524 [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) { 1525 return Pred(RV); 1526 }); 1527 } 1528 1529 static bool checkForAllInstructionsImpl( 1530 Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap, 1531 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA, 1532 const AAIsDead *LivenessAA, const ArrayRef<unsigned> &Opcodes, 1533 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false, 1534 bool CheckPotentiallyDead = false) { 1535 for (unsigned Opcode : Opcodes) { 1536 // Check if we have instructions with this opcode at all first. 1537 auto *Insts = OpcodeInstMap.lookup(Opcode); 1538 if (!Insts) 1539 continue; 1540 1541 for (Instruction *I : *Insts) { 1542 // Skip dead instructions. 1543 if (A && !CheckPotentiallyDead && 1544 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA, 1545 UsedAssumedInformation, CheckBBLivenessOnly)) { 1546 LLVM_DEBUG(dbgs() << "[Attributor] Instruction " << *I 1547 << " is potentially dead, skip!\n";); 1548 continue; 1549 } 1550 1551 if (!Pred(*I)) 1552 return false; 1553 } 1554 } 1555 return true; 1556 } 1557 1558 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred, 1559 const Function *Fn, 1560 const AbstractAttribute &QueryingAA, 1561 const ArrayRef<unsigned> &Opcodes, 1562 bool &UsedAssumedInformation, 1563 bool CheckBBLivenessOnly, 1564 bool CheckPotentiallyDead) { 1565 // Since we need to provide instructions we have to have an exact definition. 1566 if (!Fn || Fn->isDeclaration()) 1567 return false; 1568 1569 // TODO: use the function scope once we have call site AAReturnedValues. 1570 const IRPosition &QueryIRP = IRPosition::function(*Fn); 1571 const auto *LivenessAA = 1572 (CheckBBLivenessOnly || CheckPotentiallyDead) 1573 ? nullptr 1574 : &(getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE)); 1575 1576 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn); 1577 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, &QueryingAA, 1578 LivenessAA, Opcodes, UsedAssumedInformation, 1579 CheckBBLivenessOnly, CheckPotentiallyDead)) 1580 return false; 1581 1582 return true; 1583 } 1584 1585 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred, 1586 const AbstractAttribute &QueryingAA, 1587 const ArrayRef<unsigned> &Opcodes, 1588 bool &UsedAssumedInformation, 1589 bool CheckBBLivenessOnly, 1590 bool CheckPotentiallyDead) { 1591 const IRPosition &IRP = QueryingAA.getIRPosition(); 1592 const Function *AssociatedFunction = IRP.getAssociatedFunction(); 1593 return checkForAllInstructions(Pred, AssociatedFunction, QueryingAA, Opcodes, 1594 UsedAssumedInformation, CheckBBLivenessOnly, 1595 CheckPotentiallyDead); 1596 } 1597 1598 bool Attributor::checkForAllReadWriteInstructions( 1599 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA, 1600 bool &UsedAssumedInformation) { 1601 1602 const Function *AssociatedFunction = 1603 QueryingAA.getIRPosition().getAssociatedFunction(); 1604 if (!AssociatedFunction) 1605 return false; 1606 1607 // TODO: use the function scope once we have call site AAReturnedValues. 1608 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction); 1609 const auto &LivenessAA = 1610 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE); 1611 1612 for (Instruction *I : 1613 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) { 1614 // Skip dead instructions. 1615 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, &LivenessAA, 1616 UsedAssumedInformation)) 1617 continue; 1618 1619 if (!Pred(*I)) 1620 return false; 1621 } 1622 1623 return true; 1624 } 1625 1626 void Attributor::runTillFixpoint() { 1627 TimeTraceScope TimeScope("Attributor::runTillFixpoint"); 1628 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized " 1629 << DG.SyntheticRoot.Deps.size() 1630 << " abstract attributes.\n"); 1631 1632 // Now that all abstract attributes are collected and initialized we start 1633 // the abstract analysis. 1634 1635 unsigned IterationCounter = 1; 1636 unsigned MaxIterations = 1637 Configuration.MaxFixpointIterations.getValueOr(SetFixpointIterations); 1638 1639 SmallVector<AbstractAttribute *, 32> ChangedAAs; 1640 SetVector<AbstractAttribute *> Worklist, InvalidAAs; 1641 Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end()); 1642 1643 do { 1644 // Remember the size to determine new attributes. 1645 size_t NumAAs = DG.SyntheticRoot.Deps.size(); 1646 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter 1647 << ", Worklist size: " << Worklist.size() << "\n"); 1648 1649 // For invalid AAs we can fix dependent AAs that have a required dependence, 1650 // thereby folding long dependence chains in a single step without the need 1651 // to run updates. 1652 for (unsigned u = 0; u < InvalidAAs.size(); ++u) { 1653 AbstractAttribute *InvalidAA = InvalidAAs[u]; 1654 1655 // Check the dependences to fast track invalidation. 1656 LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has " 1657 << InvalidAA->Deps.size() 1658 << " required & optional dependences\n"); 1659 while (!InvalidAA->Deps.empty()) { 1660 const auto &Dep = InvalidAA->Deps.back(); 1661 InvalidAA->Deps.pop_back(); 1662 AbstractAttribute *DepAA = cast<AbstractAttribute>(Dep.getPointer()); 1663 if (Dep.getInt() == unsigned(DepClassTy::OPTIONAL)) { 1664 LLVM_DEBUG(dbgs() << " - recompute: " << *DepAA); 1665 Worklist.insert(DepAA); 1666 continue; 1667 } 1668 LLVM_DEBUG(dbgs() << " - invalidate: " << *DepAA); 1669 DepAA->getState().indicatePessimisticFixpoint(); 1670 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!"); 1671 if (!DepAA->getState().isValidState()) 1672 InvalidAAs.insert(DepAA); 1673 else 1674 ChangedAAs.push_back(DepAA); 1675 } 1676 } 1677 1678 // Add all abstract attributes that are potentially dependent on one that 1679 // changed to the work list. 1680 for (AbstractAttribute *ChangedAA : ChangedAAs) 1681 while (!ChangedAA->Deps.empty()) { 1682 Worklist.insert( 1683 cast<AbstractAttribute>(ChangedAA->Deps.back().getPointer())); 1684 ChangedAA->Deps.pop_back(); 1685 } 1686 1687 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter 1688 << ", Worklist+Dependent size: " << Worklist.size() 1689 << "\n"); 1690 1691 // Reset the changed and invalid set. 1692 ChangedAAs.clear(); 1693 InvalidAAs.clear(); 1694 1695 // Update all abstract attribute in the work list and record the ones that 1696 // changed. 1697 for (AbstractAttribute *AA : Worklist) { 1698 const auto &AAState = AA->getState(); 1699 if (!AAState.isAtFixpoint()) 1700 if (updateAA(*AA) == ChangeStatus::CHANGED) 1701 ChangedAAs.push_back(AA); 1702 1703 // Use the InvalidAAs vector to propagate invalid states fast transitively 1704 // without requiring updates. 1705 if (!AAState.isValidState()) 1706 InvalidAAs.insert(AA); 1707 } 1708 1709 // Add attributes to the changed set if they have been created in the last 1710 // iteration. 1711 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs, 1712 DG.SyntheticRoot.end()); 1713 1714 // Reset the work list and repopulate with the changed abstract attributes. 1715 // Note that dependent ones are added above. 1716 Worklist.clear(); 1717 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end()); 1718 Worklist.insert(QueryAAsAwaitingUpdate.begin(), 1719 QueryAAsAwaitingUpdate.end()); 1720 QueryAAsAwaitingUpdate.clear(); 1721 1722 } while (!Worklist.empty() && 1723 (IterationCounter++ < MaxIterations || VerifyMaxFixpointIterations)); 1724 1725 if (IterationCounter > MaxIterations && !Functions.empty()) { 1726 auto Remark = [&](OptimizationRemarkMissed ORM) { 1727 return ORM << "Attributor did not reach a fixpoint after " 1728 << ore::NV("Iterations", MaxIterations) << " iterations."; 1729 }; 1730 Function *F = Functions.front(); 1731 emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark); 1732 } 1733 1734 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: " 1735 << IterationCounter << "/" << MaxIterations 1736 << " iterations\n"); 1737 1738 // Reset abstract arguments not settled in a sound fixpoint by now. This 1739 // happens when we stopped the fixpoint iteration early. Note that only the 1740 // ones marked as "changed" *and* the ones transitively depending on them 1741 // need to be reverted to a pessimistic state. Others might not be in a 1742 // fixpoint state but we can use the optimistic results for them anyway. 1743 SmallPtrSet<AbstractAttribute *, 32> Visited; 1744 for (unsigned u = 0; u < ChangedAAs.size(); u++) { 1745 AbstractAttribute *ChangedAA = ChangedAAs[u]; 1746 if (!Visited.insert(ChangedAA).second) 1747 continue; 1748 1749 AbstractState &State = ChangedAA->getState(); 1750 if (!State.isAtFixpoint()) { 1751 State.indicatePessimisticFixpoint(); 1752 1753 NumAttributesTimedOut++; 1754 } 1755 1756 while (!ChangedAA->Deps.empty()) { 1757 ChangedAAs.push_back( 1758 cast<AbstractAttribute>(ChangedAA->Deps.back().getPointer())); 1759 ChangedAA->Deps.pop_back(); 1760 } 1761 } 1762 1763 LLVM_DEBUG({ 1764 if (!Visited.empty()) 1765 dbgs() << "\n[Attributor] Finalized " << Visited.size() 1766 << " abstract attributes.\n"; 1767 }); 1768 1769 if (VerifyMaxFixpointIterations && IterationCounter != MaxIterations) { 1770 errs() << "\n[Attributor] Fixpoint iteration done after: " 1771 << IterationCounter << "/" << MaxIterations << " iterations\n"; 1772 llvm_unreachable("The fixpoint was not reached with exactly the number of " 1773 "specified iterations!"); 1774 } 1775 } 1776 1777 void Attributor::registerForUpdate(AbstractAttribute &AA) { 1778 assert(AA.isQueryAA() && 1779 "Non-query AAs should not be required to register for updates!"); 1780 QueryAAsAwaitingUpdate.insert(&AA); 1781 } 1782 1783 ChangeStatus Attributor::manifestAttributes() { 1784 TimeTraceScope TimeScope("Attributor::manifestAttributes"); 1785 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size(); 1786 1787 unsigned NumManifested = 0; 1788 unsigned NumAtFixpoint = 0; 1789 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED; 1790 for (auto &DepAA : DG.SyntheticRoot.Deps) { 1791 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer()); 1792 AbstractState &State = AA->getState(); 1793 1794 // If there is not already a fixpoint reached, we can now take the 1795 // optimistic state. This is correct because we enforced a pessimistic one 1796 // on abstract attributes that were transitively dependent on a changed one 1797 // already above. 1798 if (!State.isAtFixpoint()) 1799 State.indicateOptimisticFixpoint(); 1800 1801 // We must not manifest Attributes that use Callbase info. 1802 if (AA->hasCallBaseContext()) 1803 continue; 1804 // If the state is invalid, we do not try to manifest it. 1805 if (!State.isValidState()) 1806 continue; 1807 1808 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope())) 1809 continue; 1810 1811 // Skip dead code. 1812 bool UsedAssumedInformation = false; 1813 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation, 1814 /* CheckBBLivenessOnly */ true)) 1815 continue; 1816 // Check if the manifest debug counter that allows skipping manifestation of 1817 // AAs 1818 if (!DebugCounter::shouldExecute(ManifestDBGCounter)) 1819 continue; 1820 // Manifest the state and record if we changed the IR. 1821 ChangeStatus LocalChange = AA->manifest(*this); 1822 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled()) 1823 AA->trackStatistics(); 1824 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA 1825 << "\n"); 1826 1827 ManifestChange = ManifestChange | LocalChange; 1828 1829 NumAtFixpoint++; 1830 NumManifested += (LocalChange == ChangeStatus::CHANGED); 1831 } 1832 1833 (void)NumManifested; 1834 (void)NumAtFixpoint; 1835 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested 1836 << " arguments while " << NumAtFixpoint 1837 << " were in a valid fixpoint state\n"); 1838 1839 NumAttributesManifested += NumManifested; 1840 NumAttributesValidFixpoint += NumAtFixpoint; 1841 1842 (void)NumFinalAAs; 1843 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) { 1844 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size(); ++u) 1845 errs() << "Unexpected abstract attribute: " 1846 << cast<AbstractAttribute>(DG.SyntheticRoot.Deps[u].getPointer()) 1847 << " :: " 1848 << cast<AbstractAttribute>(DG.SyntheticRoot.Deps[u].getPointer()) 1849 ->getIRPosition() 1850 .getAssociatedValue() 1851 << "\n"; 1852 llvm_unreachable("Expected the final number of abstract attributes to " 1853 "remain unchanged!"); 1854 } 1855 return ManifestChange; 1856 } 1857 1858 void Attributor::identifyDeadInternalFunctions() { 1859 // Early exit if we don't intend to delete functions. 1860 if (!Configuration.DeleteFns) 1861 return; 1862 1863 // Identify dead internal functions and delete them. This happens outside 1864 // the other fixpoint analysis as we might treat potentially dead functions 1865 // as live to lower the number of iterations. If they happen to be dead, the 1866 // below fixpoint loop will identify and eliminate them. 1867 SmallVector<Function *, 8> InternalFns; 1868 for (Function *F : Functions) 1869 if (F->hasLocalLinkage()) 1870 InternalFns.push_back(F); 1871 1872 SmallPtrSet<Function *, 8> LiveInternalFns; 1873 bool FoundLiveInternal = true; 1874 while (FoundLiveInternal) { 1875 FoundLiveInternal = false; 1876 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) { 1877 Function *F = InternalFns[u]; 1878 if (!F) 1879 continue; 1880 1881 bool UsedAssumedInformation = false; 1882 if (checkForAllCallSites( 1883 [&](AbstractCallSite ACS) { 1884 Function *Callee = ACS.getInstruction()->getFunction(); 1885 return ToBeDeletedFunctions.count(Callee) || 1886 (Functions.count(Callee) && Callee->hasLocalLinkage() && 1887 !LiveInternalFns.count(Callee)); 1888 }, 1889 *F, true, nullptr, UsedAssumedInformation)) { 1890 continue; 1891 } 1892 1893 LiveInternalFns.insert(F); 1894 InternalFns[u] = nullptr; 1895 FoundLiveInternal = true; 1896 } 1897 } 1898 1899 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) 1900 if (Function *F = InternalFns[u]) 1901 ToBeDeletedFunctions.insert(F); 1902 } 1903 1904 ChangeStatus Attributor::cleanupIR() { 1905 TimeTraceScope TimeScope("Attributor::cleanupIR"); 1906 // Delete stuff at the end to avoid invalid references and a nice order. 1907 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least " 1908 << ToBeDeletedFunctions.size() << " functions and " 1909 << ToBeDeletedBlocks.size() << " blocks and " 1910 << ToBeDeletedInsts.size() << " instructions and " 1911 << ToBeChangedValues.size() << " values and " 1912 << ToBeChangedUses.size() << " uses. " 1913 << "Preserve manifest added " << ManifestAddedBlocks.size() 1914 << " blocks\n"); 1915 1916 SmallVector<WeakTrackingVH, 32> DeadInsts; 1917 SmallVector<Instruction *, 32> TerminatorsToFold; 1918 1919 auto ReplaceUse = [&](Use *U, Value *NewV) { 1920 Value *OldV = U->get(); 1921 1922 // If we plan to replace NewV we need to update it at this point. 1923 do { 1924 const auto &Entry = ToBeChangedValues.lookup(NewV); 1925 if (!Entry.first) 1926 break; 1927 NewV = Entry.first; 1928 } while (true); 1929 1930 Instruction *I = dyn_cast<Instruction>(U->getUser()); 1931 assert((!I || isRunOn(*I->getFunction())) && 1932 "Cannot replace an invoke outside the current SCC!"); 1933 1934 // Do not replace uses in returns if the value is a must-tail call we will 1935 // not delete. 1936 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) { 1937 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts())) 1938 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI)) 1939 return; 1940 // If we rewrite a return and the new value is not an argument, strip the 1941 // `returned` attribute as it is wrong now. 1942 if (!isa<Argument>(NewV)) 1943 for (auto &Arg : RI->getFunction()->args()) 1944 Arg.removeAttr(Attribute::Returned); 1945 } 1946 1947 // Do not perform call graph altering changes outside the SCC. 1948 if (auto *CB = dyn_cast_or_null<CallBase>(I)) 1949 if (CB->isCallee(U)) 1950 return; 1951 1952 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser() 1953 << " instead of " << *OldV << "\n"); 1954 U->set(NewV); 1955 1956 if (Instruction *I = dyn_cast<Instruction>(OldV)) { 1957 CGModifiedFunctions.insert(I->getFunction()); 1958 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) && 1959 isInstructionTriviallyDead(I)) 1960 DeadInsts.push_back(I); 1961 } 1962 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) { 1963 auto *CB = cast<CallBase>(U->getUser()); 1964 if (CB->isArgOperand(U)) { 1965 unsigned Idx = CB->getArgOperandNo(U); 1966 CB->removeParamAttr(Idx, Attribute::NoUndef); 1967 Function *Fn = CB->getCalledFunction(); 1968 if (Fn && Fn->arg_size() > Idx) 1969 Fn->removeParamAttr(Idx, Attribute::NoUndef); 1970 } 1971 } 1972 if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) { 1973 Instruction *UserI = cast<Instruction>(U->getUser()); 1974 if (isa<UndefValue>(NewV)) { 1975 ToBeChangedToUnreachableInsts.insert(UserI); 1976 } else { 1977 TerminatorsToFold.push_back(UserI); 1978 } 1979 } 1980 }; 1981 1982 for (auto &It : ToBeChangedUses) { 1983 Use *U = It.first; 1984 Value *NewV = It.second; 1985 ReplaceUse(U, NewV); 1986 } 1987 1988 SmallVector<Use *, 4> Uses; 1989 for (auto &It : ToBeChangedValues) { 1990 Value *OldV = It.first; 1991 auto &Entry = It.second; 1992 Value *NewV = Entry.first; 1993 Uses.clear(); 1994 for (auto &U : OldV->uses()) 1995 if (Entry.second || !U.getUser()->isDroppable()) 1996 Uses.push_back(&U); 1997 for (Use *U : Uses) 1998 ReplaceUse(U, NewV); 1999 } 2000 2001 for (auto &V : InvokeWithDeadSuccessor) 2002 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) { 2003 assert(isRunOn(*II->getFunction()) && 2004 "Cannot replace an invoke outside the current SCC!"); 2005 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind); 2006 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn); 2007 bool Invoke2CallAllowed = 2008 !AAIsDead::mayCatchAsynchronousExceptions(*II->getFunction()); 2009 assert((UnwindBBIsDead || NormalBBIsDead) && 2010 "Invoke does not have dead successors!"); 2011 BasicBlock *BB = II->getParent(); 2012 BasicBlock *NormalDestBB = II->getNormalDest(); 2013 if (UnwindBBIsDead) { 2014 Instruction *NormalNextIP = &NormalDestBB->front(); 2015 if (Invoke2CallAllowed) { 2016 changeToCall(II); 2017 NormalNextIP = BB->getTerminator(); 2018 } 2019 if (NormalBBIsDead) 2020 ToBeChangedToUnreachableInsts.insert(NormalNextIP); 2021 } else { 2022 assert(NormalBBIsDead && "Broken invariant!"); 2023 if (!NormalDestBB->getUniquePredecessor()) 2024 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead"); 2025 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front()); 2026 } 2027 } 2028 for (Instruction *I : TerminatorsToFold) { 2029 assert(isRunOn(*I->getFunction()) && 2030 "Cannot replace a terminator outside the current SCC!"); 2031 CGModifiedFunctions.insert(I->getFunction()); 2032 ConstantFoldTerminator(I->getParent()); 2033 } 2034 for (auto &V : ToBeChangedToUnreachableInsts) 2035 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) { 2036 assert(isRunOn(*I->getFunction()) && 2037 "Cannot replace an instruction outside the current SCC!"); 2038 CGModifiedFunctions.insert(I->getFunction()); 2039 changeToUnreachable(I); 2040 } 2041 2042 for (auto &V : ToBeDeletedInsts) { 2043 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) { 2044 if (auto *CB = dyn_cast<CallBase>(I)) { 2045 assert(isRunOn(*I->getFunction()) && 2046 "Cannot delete an instruction outside the current SCC!"); 2047 if (!isa<IntrinsicInst>(CB)) 2048 Configuration.CGUpdater.removeCallSite(*CB); 2049 } 2050 I->dropDroppableUses(); 2051 CGModifiedFunctions.insert(I->getFunction()); 2052 if (!I->getType()->isVoidTy()) 2053 I->replaceAllUsesWith(UndefValue::get(I->getType())); 2054 if (!isa<PHINode>(I) && isInstructionTriviallyDead(I)) 2055 DeadInsts.push_back(I); 2056 else 2057 I->eraseFromParent(); 2058 } 2059 } 2060 2061 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; }); 2062 2063 LLVM_DEBUG({ 2064 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n"; 2065 for (auto &I : DeadInsts) 2066 if (I) 2067 dbgs() << " - " << *I << "\n"; 2068 }); 2069 2070 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts); 2071 2072 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) { 2073 SmallVector<BasicBlock *, 8> ToBeDeletedBBs; 2074 ToBeDeletedBBs.reserve(NumDeadBlocks); 2075 for (BasicBlock *BB : ToBeDeletedBlocks) { 2076 assert(isRunOn(*BB->getParent()) && 2077 "Cannot delete a block outside the current SCC!"); 2078 CGModifiedFunctions.insert(BB->getParent()); 2079 // Do not delete BBs added during manifests of AAs. 2080 if (ManifestAddedBlocks.contains(BB)) 2081 continue; 2082 ToBeDeletedBBs.push_back(BB); 2083 } 2084 // Actually we do not delete the blocks but squash them into a single 2085 // unreachable but untangling branches that jump here is something we need 2086 // to do in a more generic way. 2087 detachDeadBlocks(ToBeDeletedBBs, nullptr); 2088 } 2089 2090 identifyDeadInternalFunctions(); 2091 2092 // Rewrite the functions as requested during manifest. 2093 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions); 2094 2095 for (Function *Fn : CGModifiedFunctions) 2096 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn)) 2097 Configuration.CGUpdater.reanalyzeFunction(*Fn); 2098 2099 for (Function *Fn : ToBeDeletedFunctions) { 2100 if (!Functions.count(Fn)) 2101 continue; 2102 Configuration.CGUpdater.removeFunction(*Fn); 2103 } 2104 2105 if (!ToBeChangedUses.empty()) 2106 ManifestChange = ChangeStatus::CHANGED; 2107 2108 if (!ToBeChangedToUnreachableInsts.empty()) 2109 ManifestChange = ChangeStatus::CHANGED; 2110 2111 if (!ToBeDeletedFunctions.empty()) 2112 ManifestChange = ChangeStatus::CHANGED; 2113 2114 if (!ToBeDeletedBlocks.empty()) 2115 ManifestChange = ChangeStatus::CHANGED; 2116 2117 if (!ToBeDeletedInsts.empty()) 2118 ManifestChange = ChangeStatus::CHANGED; 2119 2120 if (!InvokeWithDeadSuccessor.empty()) 2121 ManifestChange = ChangeStatus::CHANGED; 2122 2123 if (!DeadInsts.empty()) 2124 ManifestChange = ChangeStatus::CHANGED; 2125 2126 NumFnDeleted += ToBeDeletedFunctions.size(); 2127 2128 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size() 2129 << " functions after manifest.\n"); 2130 2131 #ifdef EXPENSIVE_CHECKS 2132 for (Function *F : Functions) { 2133 if (ToBeDeletedFunctions.count(F)) 2134 continue; 2135 assert(!verifyFunction(*F, &errs()) && "Module verification failed!"); 2136 } 2137 #endif 2138 2139 return ManifestChange; 2140 } 2141 2142 ChangeStatus Attributor::run() { 2143 TimeTraceScope TimeScope("Attributor::run"); 2144 AttributorCallGraph ACallGraph(*this); 2145 2146 if (PrintCallGraph) 2147 ACallGraph.populateAll(); 2148 2149 Phase = AttributorPhase::UPDATE; 2150 runTillFixpoint(); 2151 2152 // dump graphs on demand 2153 if (DumpDepGraph) 2154 DG.dumpGraph(); 2155 2156 if (ViewDepGraph) 2157 DG.viewGraph(); 2158 2159 if (PrintDependencies) 2160 DG.print(); 2161 2162 Phase = AttributorPhase::MANIFEST; 2163 ChangeStatus ManifestChange = manifestAttributes(); 2164 2165 Phase = AttributorPhase::CLEANUP; 2166 ChangeStatus CleanupChange = cleanupIR(); 2167 2168 if (PrintCallGraph) 2169 ACallGraph.print(); 2170 2171 return ManifestChange | CleanupChange; 2172 } 2173 2174 ChangeStatus Attributor::updateAA(AbstractAttribute &AA) { 2175 TimeTraceScope TimeScope( 2176 AA.getName() + std::to_string(AA.getIRPosition().getPositionKind()) + 2177 "::updateAA"); 2178 assert(Phase == AttributorPhase::UPDATE && 2179 "We can update AA only in the update stage!"); 2180 2181 // Use a new dependence vector for this update. 2182 DependenceVector DV; 2183 DependenceStack.push_back(&DV); 2184 2185 auto &AAState = AA.getState(); 2186 ChangeStatus CS = ChangeStatus::UNCHANGED; 2187 bool UsedAssumedInformation = false; 2188 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation, 2189 /* CheckBBLivenessOnly */ true)) 2190 CS = AA.update(*this); 2191 2192 if (!AA.isQueryAA() && DV.empty()) { 2193 // If the attribute did not query any non-fix information, the state 2194 // will not change and we can indicate that right away. 2195 AAState.indicateOptimisticFixpoint(); 2196 } 2197 2198 if (!AAState.isAtFixpoint()) 2199 rememberDependences(); 2200 2201 // Verify the stack was used properly, that is we pop the dependence vector we 2202 // put there earlier. 2203 DependenceVector *PoppedDV = DependenceStack.pop_back_val(); 2204 (void)PoppedDV; 2205 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!"); 2206 2207 return CS; 2208 } 2209 2210 void Attributor::createShallowWrapper(Function &F) { 2211 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!"); 2212 2213 Module &M = *F.getParent(); 2214 LLVMContext &Ctx = M.getContext(); 2215 FunctionType *FnTy = F.getFunctionType(); 2216 2217 Function *Wrapper = 2218 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName()); 2219 F.setName(""); // set the inside function anonymous 2220 M.getFunctionList().insert(F.getIterator(), Wrapper); 2221 2222 F.setLinkage(GlobalValue::InternalLinkage); 2223 2224 F.replaceAllUsesWith(Wrapper); 2225 assert(F.use_empty() && "Uses remained after wrapper was created!"); 2226 2227 // Move the COMDAT section to the wrapper. 2228 // TODO: Check if we need to keep it for F as well. 2229 Wrapper->setComdat(F.getComdat()); 2230 F.setComdat(nullptr); 2231 2232 // Copy all metadata and attributes but keep them on F as well. 2233 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs; 2234 F.getAllMetadata(MDs); 2235 for (auto MDIt : MDs) 2236 Wrapper->addMetadata(MDIt.first, *MDIt.second); 2237 Wrapper->setAttributes(F.getAttributes()); 2238 2239 // Create the call in the wrapper. 2240 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper); 2241 2242 SmallVector<Value *, 8> Args; 2243 Argument *FArgIt = F.arg_begin(); 2244 for (Argument &Arg : Wrapper->args()) { 2245 Args.push_back(&Arg); 2246 Arg.setName((FArgIt++)->getName()); 2247 } 2248 2249 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB); 2250 CI->setTailCall(true); 2251 CI->addFnAttr(Attribute::NoInline); 2252 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB); 2253 2254 NumFnShallowWrappersCreated++; 2255 } 2256 2257 bool Attributor::isInternalizable(Function &F) { 2258 if (F.isDeclaration() || F.hasLocalLinkage() || 2259 GlobalValue::isInterposableLinkage(F.getLinkage())) 2260 return false; 2261 return true; 2262 } 2263 2264 Function *Attributor::internalizeFunction(Function &F, bool Force) { 2265 if (!AllowDeepWrapper && !Force) 2266 return nullptr; 2267 if (!isInternalizable(F)) 2268 return nullptr; 2269 2270 SmallPtrSet<Function *, 2> FnSet = {&F}; 2271 DenseMap<Function *, Function *> InternalizedFns; 2272 internalizeFunctions(FnSet, InternalizedFns); 2273 2274 return InternalizedFns[&F]; 2275 } 2276 2277 bool Attributor::internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet, 2278 DenseMap<Function *, Function *> &FnMap) { 2279 for (Function *F : FnSet) 2280 if (!Attributor::isInternalizable(*F)) 2281 return false; 2282 2283 FnMap.clear(); 2284 // Generate the internalized version of each function. 2285 for (Function *F : FnSet) { 2286 Module &M = *F->getParent(); 2287 FunctionType *FnTy = F->getFunctionType(); 2288 2289 // Create a copy of the current function 2290 Function *Copied = 2291 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(), 2292 F->getName() + ".internalized"); 2293 ValueToValueMapTy VMap; 2294 auto *NewFArgIt = Copied->arg_begin(); 2295 for (auto &Arg : F->args()) { 2296 auto ArgName = Arg.getName(); 2297 NewFArgIt->setName(ArgName); 2298 VMap[&Arg] = &(*NewFArgIt++); 2299 } 2300 SmallVector<ReturnInst *, 8> Returns; 2301 2302 // Copy the body of the original function to the new one 2303 CloneFunctionInto(Copied, F, VMap, 2304 CloneFunctionChangeType::LocalChangesOnly, Returns); 2305 2306 // Set the linakage and visibility late as CloneFunctionInto has some 2307 // implicit requirements. 2308 Copied->setVisibility(GlobalValue::DefaultVisibility); 2309 Copied->setLinkage(GlobalValue::PrivateLinkage); 2310 2311 // Copy metadata 2312 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs; 2313 F->getAllMetadata(MDs); 2314 for (auto MDIt : MDs) 2315 if (!Copied->hasMetadata()) 2316 Copied->addMetadata(MDIt.first, *MDIt.second); 2317 2318 M.getFunctionList().insert(F->getIterator(), Copied); 2319 Copied->setDSOLocal(true); 2320 FnMap[F] = Copied; 2321 } 2322 2323 // Replace all uses of the old function with the new internalized function 2324 // unless the caller is a function that was just internalized. 2325 for (Function *F : FnSet) { 2326 auto &InternalizedFn = FnMap[F]; 2327 auto IsNotInternalized = [&](Use &U) -> bool { 2328 if (auto *CB = dyn_cast<CallBase>(U.getUser())) 2329 return !FnMap.lookup(CB->getCaller()); 2330 return false; 2331 }; 2332 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized); 2333 } 2334 2335 return true; 2336 } 2337 2338 bool Attributor::isValidFunctionSignatureRewrite( 2339 Argument &Arg, ArrayRef<Type *> ReplacementTypes) { 2340 2341 if (!Configuration.RewriteSignatures) 2342 return false; 2343 2344 Function *Fn = Arg.getParent(); 2345 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) { 2346 // Forbid the call site to cast the function return type. If we need to 2347 // rewrite these functions we need to re-create a cast for the new call site 2348 // (if the old had uses). 2349 if (!ACS.getCalledFunction() || 2350 ACS.getInstruction()->getType() != 2351 ACS.getCalledFunction()->getReturnType()) 2352 return false; 2353 if (ACS.getCalledOperand()->getType() != Fn->getType()) 2354 return false; 2355 // Forbid must-tail calls for now. 2356 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall(); 2357 }; 2358 2359 // Avoid var-arg functions for now. 2360 if (Fn->isVarArg()) { 2361 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n"); 2362 return false; 2363 } 2364 2365 // Avoid functions with complicated argument passing semantics. 2366 AttributeList FnAttributeList = Fn->getAttributes(); 2367 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) || 2368 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) || 2369 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) || 2370 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) { 2371 LLVM_DEBUG( 2372 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n"); 2373 return false; 2374 } 2375 2376 // Avoid callbacks for now. 2377 bool UsedAssumedInformation = false; 2378 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr, 2379 UsedAssumedInformation)) { 2380 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n"); 2381 return false; 2382 } 2383 2384 auto InstPred = [](Instruction &I) { 2385 if (auto *CI = dyn_cast<CallInst>(&I)) 2386 return !CI->isMustTailCall(); 2387 return true; 2388 }; 2389 2390 // Forbid must-tail calls for now. 2391 // TODO: 2392 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn); 2393 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr, 2394 nullptr, {Instruction::Call}, 2395 UsedAssumedInformation)) { 2396 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n"); 2397 return false; 2398 } 2399 2400 return true; 2401 } 2402 2403 bool Attributor::registerFunctionSignatureRewrite( 2404 Argument &Arg, ArrayRef<Type *> ReplacementTypes, 2405 ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB, 2406 ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) { 2407 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in " 2408 << Arg.getParent()->getName() << " with " 2409 << ReplacementTypes.size() << " replacements\n"); 2410 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) && 2411 "Cannot register an invalid rewrite"); 2412 2413 Function *Fn = Arg.getParent(); 2414 SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs = 2415 ArgumentReplacementMap[Fn]; 2416 if (ARIs.empty()) 2417 ARIs.resize(Fn->arg_size()); 2418 2419 // If we have a replacement already with less than or equal new arguments, 2420 // ignore this request. 2421 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()]; 2422 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) { 2423 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n"); 2424 return false; 2425 } 2426 2427 // If we have a replacement already but we like the new one better, delete 2428 // the old. 2429 ARI.reset(); 2430 2431 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in " 2432 << Arg.getParent()->getName() << " with " 2433 << ReplacementTypes.size() << " replacements\n"); 2434 2435 // Remember the replacement. 2436 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes, 2437 std::move(CalleeRepairCB), 2438 std::move(ACSRepairCB))); 2439 2440 return true; 2441 } 2442 2443 bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) { 2444 bool Result = true; 2445 #ifndef NDEBUG 2446 if (SeedAllowList.size() != 0) 2447 Result = llvm::is_contained(SeedAllowList, AA.getName()); 2448 Function *Fn = AA.getAnchorScope(); 2449 if (FunctionSeedAllowList.size() != 0 && Fn) 2450 Result &= llvm::is_contained(FunctionSeedAllowList, Fn->getName()); 2451 #endif 2452 return Result; 2453 } 2454 2455 ChangeStatus Attributor::rewriteFunctionSignatures( 2456 SmallSetVector<Function *, 8> &ModifiedFns) { 2457 ChangeStatus Changed = ChangeStatus::UNCHANGED; 2458 2459 for (auto &It : ArgumentReplacementMap) { 2460 Function *OldFn = It.getFirst(); 2461 2462 // Deleted functions do not require rewrites. 2463 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn)) 2464 continue; 2465 2466 const SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs = 2467 It.getSecond(); 2468 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!"); 2469 2470 SmallVector<Type *, 16> NewArgumentTypes; 2471 SmallVector<AttributeSet, 16> NewArgumentAttributes; 2472 2473 // Collect replacement argument types and copy over existing attributes. 2474 AttributeList OldFnAttributeList = OldFn->getAttributes(); 2475 for (Argument &Arg : OldFn->args()) { 2476 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI = 2477 ARIs[Arg.getArgNo()]) { 2478 NewArgumentTypes.append(ARI->ReplacementTypes.begin(), 2479 ARI->ReplacementTypes.end()); 2480 NewArgumentAttributes.append(ARI->getNumReplacementArgs(), 2481 AttributeSet()); 2482 } else { 2483 NewArgumentTypes.push_back(Arg.getType()); 2484 NewArgumentAttributes.push_back( 2485 OldFnAttributeList.getParamAttrs(Arg.getArgNo())); 2486 } 2487 } 2488 2489 FunctionType *OldFnTy = OldFn->getFunctionType(); 2490 Type *RetTy = OldFnTy->getReturnType(); 2491 2492 // Construct the new function type using the new arguments types. 2493 FunctionType *NewFnTy = 2494 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg()); 2495 2496 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName() 2497 << "' from " << *OldFn->getFunctionType() << " to " 2498 << *NewFnTy << "\n"); 2499 2500 // Create the new function body and insert it into the module. 2501 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(), 2502 OldFn->getAddressSpace(), ""); 2503 Functions.insert(NewFn); 2504 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn); 2505 NewFn->takeName(OldFn); 2506 NewFn->copyAttributesFrom(OldFn); 2507 2508 // Patch the pointer to LLVM function in debug info descriptor. 2509 NewFn->setSubprogram(OldFn->getSubprogram()); 2510 OldFn->setSubprogram(nullptr); 2511 2512 // Recompute the parameter attributes list based on the new arguments for 2513 // the function. 2514 LLVMContext &Ctx = OldFn->getContext(); 2515 NewFn->setAttributes(AttributeList::get( 2516 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(), 2517 NewArgumentAttributes)); 2518 2519 // Since we have now created the new function, splice the body of the old 2520 // function right into the new function, leaving the old rotting hulk of the 2521 // function empty. 2522 NewFn->getBasicBlockList().splice(NewFn->begin(), 2523 OldFn->getBasicBlockList()); 2524 2525 // Fixup block addresses to reference new function. 2526 SmallVector<BlockAddress *, 8u> BlockAddresses; 2527 for (User *U : OldFn->users()) 2528 if (auto *BA = dyn_cast<BlockAddress>(U)) 2529 BlockAddresses.push_back(BA); 2530 for (auto *BA : BlockAddresses) 2531 BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock())); 2532 2533 // Set of all "call-like" instructions that invoke the old function mapped 2534 // to their new replacements. 2535 SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs; 2536 2537 // Callback to create a new "call-like" instruction for a given one. 2538 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) { 2539 CallBase *OldCB = cast<CallBase>(ACS.getInstruction()); 2540 const AttributeList &OldCallAttributeList = OldCB->getAttributes(); 2541 2542 // Collect the new argument operands for the replacement call site. 2543 SmallVector<Value *, 16> NewArgOperands; 2544 SmallVector<AttributeSet, 16> NewArgOperandAttributes; 2545 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) { 2546 unsigned NewFirstArgNum = NewArgOperands.size(); 2547 (void)NewFirstArgNum; // only used inside assert. 2548 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI = 2549 ARIs[OldArgNum]) { 2550 if (ARI->ACSRepairCB) 2551 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands); 2552 assert(ARI->getNumReplacementArgs() + NewFirstArgNum == 2553 NewArgOperands.size() && 2554 "ACS repair callback did not provide as many operand as new " 2555 "types were registered!"); 2556 // TODO: Exose the attribute set to the ACS repair callback 2557 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(), 2558 AttributeSet()); 2559 } else { 2560 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum)); 2561 NewArgOperandAttributes.push_back( 2562 OldCallAttributeList.getParamAttrs(OldArgNum)); 2563 } 2564 } 2565 2566 assert(NewArgOperands.size() == NewArgOperandAttributes.size() && 2567 "Mismatch # argument operands vs. # argument operand attributes!"); 2568 assert(NewArgOperands.size() == NewFn->arg_size() && 2569 "Mismatch # argument operands vs. # function arguments!"); 2570 2571 SmallVector<OperandBundleDef, 4> OperandBundleDefs; 2572 OldCB->getOperandBundlesAsDefs(OperandBundleDefs); 2573 2574 // Create a new call or invoke instruction to replace the old one. 2575 CallBase *NewCB; 2576 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) { 2577 NewCB = 2578 InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(), 2579 NewArgOperands, OperandBundleDefs, "", OldCB); 2580 } else { 2581 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs, 2582 "", OldCB); 2583 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind()); 2584 NewCB = NewCI; 2585 } 2586 2587 // Copy over various properties and the new attributes. 2588 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg}); 2589 NewCB->setCallingConv(OldCB->getCallingConv()); 2590 NewCB->takeName(OldCB); 2591 NewCB->setAttributes(AttributeList::get( 2592 Ctx, OldCallAttributeList.getFnAttrs(), 2593 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes)); 2594 2595 CallSitePairs.push_back({OldCB, NewCB}); 2596 return true; 2597 }; 2598 2599 // Use the CallSiteReplacementCreator to create replacement call sites. 2600 bool UsedAssumedInformation = false; 2601 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn, 2602 true, nullptr, UsedAssumedInformation); 2603 (void)Success; 2604 assert(Success && "Assumed call site replacement to succeed!"); 2605 2606 // Rewire the arguments. 2607 Argument *OldFnArgIt = OldFn->arg_begin(); 2608 Argument *NewFnArgIt = NewFn->arg_begin(); 2609 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); 2610 ++OldArgNum, ++OldFnArgIt) { 2611 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI = 2612 ARIs[OldArgNum]) { 2613 if (ARI->CalleeRepairCB) 2614 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt); 2615 if (ARI->ReplacementTypes.empty()) 2616 OldFnArgIt->replaceAllUsesWith( 2617 PoisonValue::get(OldFnArgIt->getType())); 2618 NewFnArgIt += ARI->ReplacementTypes.size(); 2619 } else { 2620 NewFnArgIt->takeName(&*OldFnArgIt); 2621 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt); 2622 ++NewFnArgIt; 2623 } 2624 } 2625 2626 // Eliminate the instructions *after* we visited all of them. 2627 for (auto &CallSitePair : CallSitePairs) { 2628 CallBase &OldCB = *CallSitePair.first; 2629 CallBase &NewCB = *CallSitePair.second; 2630 assert(OldCB.getType() == NewCB.getType() && 2631 "Cannot handle call sites with different types!"); 2632 ModifiedFns.insert(OldCB.getFunction()); 2633 Configuration.CGUpdater.replaceCallSite(OldCB, NewCB); 2634 OldCB.replaceAllUsesWith(&NewCB); 2635 OldCB.eraseFromParent(); 2636 } 2637 2638 // Replace the function in the call graph (if any). 2639 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn); 2640 2641 // If the old function was modified and needed to be reanalyzed, the new one 2642 // does now. 2643 if (ModifiedFns.remove(OldFn)) 2644 ModifiedFns.insert(NewFn); 2645 2646 Changed = ChangeStatus::CHANGED; 2647 } 2648 2649 return Changed; 2650 } 2651 2652 void InformationCache::initializeInformationCache(const Function &CF, 2653 FunctionInfo &FI) { 2654 // As we do not modify the function here we can remove the const 2655 // withouth breaking implicit assumptions. At the end of the day, we could 2656 // initialize the cache eagerly which would look the same to the users. 2657 Function &F = const_cast<Function &>(CF); 2658 2659 // Walk all instructions to find interesting instructions that might be 2660 // queried by abstract attributes during their initialization or update. 2661 // This has to happen before we create attributes. 2662 2663 DenseMap<const Value *, Optional<short>> AssumeUsesMap; 2664 2665 // Add \p V to the assume uses map which track the number of uses outside of 2666 // "visited" assumes. If no outside uses are left the value is added to the 2667 // assume only use vector. 2668 auto AddToAssumeUsesMap = [&](const Value &V) -> void { 2669 SmallVector<const Instruction *> Worklist; 2670 if (auto *I = dyn_cast<Instruction>(&V)) 2671 Worklist.push_back(I); 2672 while (!Worklist.empty()) { 2673 const Instruction *I = Worklist.pop_back_val(); 2674 Optional<short> &NumUses = AssumeUsesMap[I]; 2675 if (!NumUses.hasValue()) 2676 NumUses = I->getNumUses(); 2677 NumUses = NumUses.getValue() - /* this assume */ 1; 2678 if (NumUses.getValue() != 0) 2679 continue; 2680 AssumeOnlyValues.insert(I); 2681 for (const Value *Op : I->operands()) 2682 if (auto *OpI = dyn_cast<Instruction>(Op)) 2683 Worklist.push_back(OpI); 2684 } 2685 }; 2686 2687 for (Instruction &I : instructions(&F)) { 2688 bool IsInterestingOpcode = false; 2689 2690 // To allow easy access to all instructions in a function with a given 2691 // opcode we store them in the InfoCache. As not all opcodes are interesting 2692 // to concrete attributes we only cache the ones that are as identified in 2693 // the following switch. 2694 // Note: There are no concrete attributes now so this is initially empty. 2695 switch (I.getOpcode()) { 2696 default: 2697 assert(!isa<CallBase>(&I) && 2698 "New call base instruction type needs to be known in the " 2699 "Attributor."); 2700 break; 2701 case Instruction::Call: 2702 // Calls are interesting on their own, additionally: 2703 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them. 2704 // For `must-tail` calls we remember the caller and callee. 2705 if (auto *Assume = dyn_cast<AssumeInst>(&I)) { 2706 fillMapFromAssume(*Assume, KnowledgeMap); 2707 AddToAssumeUsesMap(*Assume->getArgOperand(0)); 2708 } else if (cast<CallInst>(I).isMustTailCall()) { 2709 FI.ContainsMustTailCall = true; 2710 if (const Function *Callee = cast<CallInst>(I).getCalledFunction()) 2711 getFunctionInfo(*Callee).CalledViaMustTail = true; 2712 } 2713 LLVM_FALLTHROUGH; 2714 case Instruction::CallBr: 2715 case Instruction::Invoke: 2716 case Instruction::CleanupRet: 2717 case Instruction::CatchSwitch: 2718 case Instruction::AtomicRMW: 2719 case Instruction::AtomicCmpXchg: 2720 case Instruction::Br: 2721 case Instruction::Resume: 2722 case Instruction::Ret: 2723 case Instruction::Load: 2724 // The alignment of a pointer is interesting for loads. 2725 case Instruction::Store: 2726 // The alignment of a pointer is interesting for stores. 2727 case Instruction::Alloca: 2728 case Instruction::AddrSpaceCast: 2729 IsInterestingOpcode = true; 2730 } 2731 if (IsInterestingOpcode) { 2732 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()]; 2733 if (!Insts) 2734 Insts = new (Allocator) InstructionVectorTy(); 2735 Insts->push_back(&I); 2736 } 2737 if (I.mayReadOrWriteMemory()) 2738 FI.RWInsts.push_back(&I); 2739 } 2740 2741 if (F.hasFnAttribute(Attribute::AlwaysInline) && 2742 isInlineViable(F).isSuccess()) 2743 InlineableFunctions.insert(&F); 2744 } 2745 2746 AAResults *InformationCache::getAAResultsForFunction(const Function &F) { 2747 return AG.getAnalysis<AAManager>(F); 2748 } 2749 2750 InformationCache::FunctionInfo::~FunctionInfo() { 2751 // The instruction vectors are allocated using a BumpPtrAllocator, we need to 2752 // manually destroy them. 2753 for (auto &It : OpcodeInstMap) 2754 It.getSecond()->~InstructionVectorTy(); 2755 } 2756 2757 void Attributor::recordDependence(const AbstractAttribute &FromAA, 2758 const AbstractAttribute &ToAA, 2759 DepClassTy DepClass) { 2760 if (DepClass == DepClassTy::NONE) 2761 return; 2762 // If we are outside of an update, thus before the actual fixpoint iteration 2763 // started (= when we create AAs), we do not track dependences because we will 2764 // put all AAs into the initial worklist anyway. 2765 if (DependenceStack.empty()) 2766 return; 2767 if (FromAA.getState().isAtFixpoint()) 2768 return; 2769 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass}); 2770 } 2771 2772 void Attributor::rememberDependences() { 2773 assert(!DependenceStack.empty() && "No dependences to remember!"); 2774 2775 for (DepInfo &DI : *DependenceStack.back()) { 2776 assert((DI.DepClass == DepClassTy::REQUIRED || 2777 DI.DepClass == DepClassTy::OPTIONAL) && 2778 "Expected required or optional dependence (1 bit)!"); 2779 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps; 2780 DepAAs.push_back(AbstractAttribute::DepTy( 2781 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass))); 2782 } 2783 } 2784 2785 void Attributor::identifyDefaultAbstractAttributes(Function &F) { 2786 if (!VisitedFunctions.insert(&F).second) 2787 return; 2788 if (F.isDeclaration()) 2789 return; 2790 2791 // In non-module runs we need to look at the call sites of a function to 2792 // determine if it is part of a must-tail call edge. This will influence what 2793 // attributes we can derive. 2794 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F); 2795 if (!isModulePass() && !FI.CalledViaMustTail) { 2796 for (const Use &U : F.uses()) 2797 if (const auto *CB = dyn_cast<CallBase>(U.getUser())) 2798 if (CB->isCallee(&U) && CB->isMustTailCall()) 2799 FI.CalledViaMustTail = true; 2800 } 2801 2802 IRPosition FPos = IRPosition::function(F); 2803 2804 // Check for dead BasicBlocks in every function. 2805 // We need dead instruction detection because we do not want to deal with 2806 // broken IR in which SSA rules do not apply. 2807 getOrCreateAAFor<AAIsDead>(FPos); 2808 2809 // Every function might be "will-return". 2810 getOrCreateAAFor<AAWillReturn>(FPos); 2811 2812 // Every function might contain instructions that cause "undefined behavior". 2813 getOrCreateAAFor<AAUndefinedBehavior>(FPos); 2814 2815 // Every function can be nounwind. 2816 getOrCreateAAFor<AANoUnwind>(FPos); 2817 2818 // Every function might be marked "nosync" 2819 getOrCreateAAFor<AANoSync>(FPos); 2820 2821 // Every function might be "no-free". 2822 getOrCreateAAFor<AANoFree>(FPos); 2823 2824 // Every function might be "no-return". 2825 getOrCreateAAFor<AANoReturn>(FPos); 2826 2827 // Every function might be "no-recurse". 2828 getOrCreateAAFor<AANoRecurse>(FPos); 2829 2830 // Every function might be "readnone/readonly/writeonly/...". 2831 getOrCreateAAFor<AAMemoryBehavior>(FPos); 2832 2833 // Every function can be "readnone/argmemonly/inaccessiblememonly/...". 2834 getOrCreateAAFor<AAMemoryLocation>(FPos); 2835 2836 // Every function can track active assumptions. 2837 getOrCreateAAFor<AAAssumptionInfo>(FPos); 2838 2839 // Every function might be applicable for Heap-To-Stack conversion. 2840 if (EnableHeapToStack) 2841 getOrCreateAAFor<AAHeapToStack>(FPos); 2842 2843 // Return attributes are only appropriate if the return type is non void. 2844 Type *ReturnType = F.getReturnType(); 2845 if (!ReturnType->isVoidTy()) { 2846 // Argument attribute "returned" --- Create only one per function even 2847 // though it is an argument attribute. 2848 getOrCreateAAFor<AAReturnedValues>(FPos); 2849 2850 IRPosition RetPos = IRPosition::returned(F); 2851 2852 // Every returned value might be dead. 2853 getOrCreateAAFor<AAIsDead>(RetPos); 2854 2855 // Every function might be simplified. 2856 getOrCreateAAFor<AAValueSimplify>(RetPos); 2857 2858 // Every returned value might be marked noundef. 2859 getOrCreateAAFor<AANoUndef>(RetPos); 2860 2861 if (ReturnType->isPointerTy()) { 2862 2863 // Every function with pointer return type might be marked align. 2864 getOrCreateAAFor<AAAlign>(RetPos); 2865 2866 // Every function with pointer return type might be marked nonnull. 2867 getOrCreateAAFor<AANonNull>(RetPos); 2868 2869 // Every function with pointer return type might be marked noalias. 2870 getOrCreateAAFor<AANoAlias>(RetPos); 2871 2872 // Every function with pointer return type might be marked 2873 // dereferenceable. 2874 getOrCreateAAFor<AADereferenceable>(RetPos); 2875 } 2876 } 2877 2878 for (Argument &Arg : F.args()) { 2879 IRPosition ArgPos = IRPosition::argument(Arg); 2880 2881 // Every argument might be simplified. We have to go through the Attributor 2882 // interface though as outside AAs can register custom simplification 2883 // callbacks. 2884 bool UsedAssumedInformation = false; 2885 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation); 2886 2887 // Every argument might be dead. 2888 getOrCreateAAFor<AAIsDead>(ArgPos); 2889 2890 // Every argument might be marked noundef. 2891 getOrCreateAAFor<AANoUndef>(ArgPos); 2892 2893 if (Arg.getType()->isPointerTy()) { 2894 // Every argument with pointer type might be marked nonnull. 2895 getOrCreateAAFor<AANonNull>(ArgPos); 2896 2897 // Every argument with pointer type might be marked noalias. 2898 getOrCreateAAFor<AANoAlias>(ArgPos); 2899 2900 // Every argument with pointer type might be marked dereferenceable. 2901 getOrCreateAAFor<AADereferenceable>(ArgPos); 2902 2903 // Every argument with pointer type might be marked align. 2904 getOrCreateAAFor<AAAlign>(ArgPos); 2905 2906 // Every argument with pointer type might be marked nocapture. 2907 getOrCreateAAFor<AANoCapture>(ArgPos); 2908 2909 // Every argument with pointer type might be marked 2910 // "readnone/readonly/writeonly/..." 2911 getOrCreateAAFor<AAMemoryBehavior>(ArgPos); 2912 2913 // Every argument with pointer type might be marked nofree. 2914 getOrCreateAAFor<AANoFree>(ArgPos); 2915 2916 // Every argument with pointer type might be privatizable (or promotable) 2917 getOrCreateAAFor<AAPrivatizablePtr>(ArgPos); 2918 } 2919 } 2920 2921 auto CallSitePred = [&](Instruction &I) -> bool { 2922 auto &CB = cast<CallBase>(I); 2923 IRPosition CBInstPos = IRPosition::inst(CB); 2924 IRPosition CBFnPos = IRPosition::callsite_function(CB); 2925 2926 // Call sites might be dead if they do not have side effects and no live 2927 // users. The return value might be dead if there are no live users. 2928 getOrCreateAAFor<AAIsDead>(CBInstPos); 2929 2930 Function *Callee = CB.getCalledFunction(); 2931 // TODO: Even if the callee is not known now we might be able to simplify 2932 // the call/callee. 2933 if (!Callee) 2934 return true; 2935 2936 // Every call site can track active assumptions. 2937 getOrCreateAAFor<AAAssumptionInfo>(CBFnPos); 2938 2939 // Skip declarations except if annotations on their call sites were 2940 // explicitly requested. 2941 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() && 2942 !Callee->hasMetadata(LLVMContext::MD_callback)) 2943 return true; 2944 2945 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) { 2946 2947 IRPosition CBRetPos = IRPosition::callsite_returned(CB); 2948 getOrCreateAAFor<AAValueSimplify>(CBRetPos); 2949 } 2950 2951 for (int I = 0, E = CB.arg_size(); I < E; ++I) { 2952 2953 IRPosition CBArgPos = IRPosition::callsite_argument(CB, I); 2954 2955 // Every call site argument might be dead. 2956 getOrCreateAAFor<AAIsDead>(CBArgPos); 2957 2958 // Call site argument might be simplified. We have to go through the 2959 // Attributor interface though as outside AAs can register custom 2960 // simplification callbacks. 2961 bool UsedAssumedInformation = false; 2962 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation); 2963 2964 // Every call site argument might be marked "noundef". 2965 getOrCreateAAFor<AANoUndef>(CBArgPos); 2966 2967 if (!CB.getArgOperand(I)->getType()->isPointerTy()) 2968 continue; 2969 2970 // Call site argument attribute "non-null". 2971 getOrCreateAAFor<AANonNull>(CBArgPos); 2972 2973 // Call site argument attribute "nocapture". 2974 getOrCreateAAFor<AANoCapture>(CBArgPos); 2975 2976 // Call site argument attribute "no-alias". 2977 getOrCreateAAFor<AANoAlias>(CBArgPos); 2978 2979 // Call site argument attribute "dereferenceable". 2980 getOrCreateAAFor<AADereferenceable>(CBArgPos); 2981 2982 // Call site argument attribute "align". 2983 getOrCreateAAFor<AAAlign>(CBArgPos); 2984 2985 // Call site argument attribute 2986 // "readnone/readonly/writeonly/..." 2987 getOrCreateAAFor<AAMemoryBehavior>(CBArgPos); 2988 2989 // Call site argument attribute "nofree". 2990 getOrCreateAAFor<AANoFree>(CBArgPos); 2991 } 2992 return true; 2993 }; 2994 2995 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F); 2996 bool Success; 2997 bool UsedAssumedInformation = false; 2998 Success = checkForAllInstructionsImpl( 2999 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr, 3000 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr, 3001 (unsigned)Instruction::Call}, 3002 UsedAssumedInformation); 3003 (void)Success; 3004 assert(Success && "Expected the check call to be successful!"); 3005 3006 auto LoadStorePred = [&](Instruction &I) -> bool { 3007 if (isa<LoadInst>(I)) { 3008 getOrCreateAAFor<AAAlign>( 3009 IRPosition::value(*cast<LoadInst>(I).getPointerOperand())); 3010 if (SimplifyAllLoads) 3011 getOrCreateAAFor<AAValueSimplify>(IRPosition::value(I)); 3012 } else 3013 getOrCreateAAFor<AAAlign>( 3014 IRPosition::value(*cast<StoreInst>(I).getPointerOperand())); 3015 return true; 3016 }; 3017 Success = checkForAllInstructionsImpl( 3018 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr, 3019 {(unsigned)Instruction::Load, (unsigned)Instruction::Store}, 3020 UsedAssumedInformation); 3021 (void)Success; 3022 assert(Success && "Expected the check call to be successful!"); 3023 } 3024 3025 /// Helpers to ease debugging through output streams and print calls. 3026 /// 3027 ///{ 3028 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) { 3029 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged"); 3030 } 3031 3032 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) { 3033 switch (AP) { 3034 case IRPosition::IRP_INVALID: 3035 return OS << "inv"; 3036 case IRPosition::IRP_FLOAT: 3037 return OS << "flt"; 3038 case IRPosition::IRP_RETURNED: 3039 return OS << "fn_ret"; 3040 case IRPosition::IRP_CALL_SITE_RETURNED: 3041 return OS << "cs_ret"; 3042 case IRPosition::IRP_FUNCTION: 3043 return OS << "fn"; 3044 case IRPosition::IRP_CALL_SITE: 3045 return OS << "cs"; 3046 case IRPosition::IRP_ARGUMENT: 3047 return OS << "arg"; 3048 case IRPosition::IRP_CALL_SITE_ARGUMENT: 3049 return OS << "cs_arg"; 3050 } 3051 llvm_unreachable("Unknown attribute position!"); 3052 } 3053 3054 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) { 3055 const Value &AV = Pos.getAssociatedValue(); 3056 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " [" 3057 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]"; 3058 3059 if (Pos.hasCallBaseContext()) 3060 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]"; 3061 return OS << "}"; 3062 } 3063 3064 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) { 3065 OS << "range-state(" << S.getBitWidth() << ")<"; 3066 S.getKnown().print(OS); 3067 OS << " / "; 3068 S.getAssumed().print(OS); 3069 OS << ">"; 3070 3071 return OS << static_cast<const AbstractState &>(S); 3072 } 3073 3074 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) { 3075 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : "")); 3076 } 3077 3078 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) { 3079 AA.print(OS); 3080 return OS; 3081 } 3082 3083 raw_ostream &llvm::operator<<(raw_ostream &OS, 3084 const PotentialConstantIntValuesState &S) { 3085 OS << "set-state(< {"; 3086 if (!S.isValidState()) 3087 OS << "full-set"; 3088 else { 3089 for (auto &it : S.getAssumedSet()) 3090 OS << it << ", "; 3091 if (S.undefIsContained()) 3092 OS << "undef "; 3093 } 3094 OS << "} >)"; 3095 3096 return OS; 3097 } 3098 3099 void AbstractAttribute::print(raw_ostream &OS) const { 3100 OS << "["; 3101 OS << getName(); 3102 OS << "] for CtxI "; 3103 3104 if (auto *I = getCtxI()) { 3105 OS << "'"; 3106 I->print(OS); 3107 OS << "'"; 3108 } else 3109 OS << "<<null inst>>"; 3110 3111 OS << " at position " << getIRPosition() << " with state " << getAsStr() 3112 << '\n'; 3113 } 3114 3115 void AbstractAttribute::printWithDeps(raw_ostream &OS) const { 3116 print(OS); 3117 3118 for (const auto &DepAA : Deps) { 3119 auto *AA = DepAA.getPointer(); 3120 OS << " updates "; 3121 AA->print(OS); 3122 } 3123 3124 OS << '\n'; 3125 } 3126 3127 raw_ostream &llvm::operator<<(raw_ostream &OS, 3128 const AAPointerInfo::Access &Acc) { 3129 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst(); 3130 if (Acc.getLocalInst() != Acc.getRemoteInst()) 3131 OS << " via " << *Acc.getLocalInst(); 3132 if (Acc.getContent().hasValue()) { 3133 if (*Acc.getContent()) 3134 OS << " [" << **Acc.getContent() << "]"; 3135 else 3136 OS << " [ <unknown> ]"; 3137 } 3138 return OS; 3139 } 3140 ///} 3141 3142 /// ---------------------------------------------------------------------------- 3143 /// Pass (Manager) Boilerplate 3144 /// ---------------------------------------------------------------------------- 3145 3146 static bool runAttributorOnFunctions(InformationCache &InfoCache, 3147 SetVector<Function *> &Functions, 3148 AnalysisGetter &AG, 3149 CallGraphUpdater &CGUpdater, 3150 bool DeleteFns, bool IsModulePass) { 3151 if (Functions.empty()) 3152 return false; 3153 3154 LLVM_DEBUG({ 3155 dbgs() << "[Attributor] Run on module with " << Functions.size() 3156 << " functions:\n"; 3157 for (Function *Fn : Functions) 3158 dbgs() << " - " << Fn->getName() << "\n"; 3159 }); 3160 3161 // Create an Attributor and initially empty information cache that is filled 3162 // while we identify default attribute opportunities. 3163 AttributorConfig AC(CGUpdater); 3164 AC.IsModulePass = IsModulePass; 3165 AC.DeleteFns = DeleteFns; 3166 Attributor A(Functions, InfoCache, AC); 3167 3168 // Create shallow wrappers for all functions that are not IPO amendable 3169 if (AllowShallowWrappers) 3170 for (Function *F : Functions) 3171 if (!A.isFunctionIPOAmendable(*F)) 3172 Attributor::createShallowWrapper(*F); 3173 3174 // Internalize non-exact functions 3175 // TODO: for now we eagerly internalize functions without calculating the 3176 // cost, we need a cost interface to determine whether internalizing 3177 // a function is "benefitial" 3178 if (AllowDeepWrapper) { 3179 unsigned FunSize = Functions.size(); 3180 for (unsigned u = 0; u < FunSize; u++) { 3181 Function *F = Functions[u]; 3182 if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() && 3183 !GlobalValue::isInterposableLinkage(F->getLinkage())) { 3184 Function *NewF = Attributor::internalizeFunction(*F); 3185 assert(NewF && "Could not internalize function."); 3186 Functions.insert(NewF); 3187 3188 // Update call graph 3189 CGUpdater.replaceFunctionWith(*F, *NewF); 3190 for (const Use &U : NewF->uses()) 3191 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) { 3192 auto *CallerF = CB->getCaller(); 3193 CGUpdater.reanalyzeFunction(*CallerF); 3194 } 3195 } 3196 } 3197 } 3198 3199 for (Function *F : Functions) { 3200 if (F->hasExactDefinition()) 3201 NumFnWithExactDefinition++; 3202 else 3203 NumFnWithoutExactDefinition++; 3204 3205 // We look at internal functions only on-demand but if any use is not a 3206 // direct call or outside the current set of analyzed functions, we have 3207 // to do it eagerly. 3208 if (F->hasLocalLinkage()) { 3209 if (llvm::all_of(F->uses(), [&Functions](const Use &U) { 3210 const auto *CB = dyn_cast<CallBase>(U.getUser()); 3211 return CB && CB->isCallee(&U) && 3212 Functions.count(const_cast<Function *>(CB->getCaller())); 3213 })) 3214 continue; 3215 } 3216 3217 // Populate the Attributor with abstract attribute opportunities in the 3218 // function and the information cache with IR information. 3219 A.identifyDefaultAbstractAttributes(*F); 3220 } 3221 3222 ChangeStatus Changed = A.run(); 3223 3224 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size() 3225 << " functions, result: " << Changed << ".\n"); 3226 return Changed == ChangeStatus::CHANGED; 3227 } 3228 3229 void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); } 3230 3231 void AADepGraph::dumpGraph() { 3232 static std::atomic<int> CallTimes; 3233 std::string Prefix; 3234 3235 if (!DepGraphDotFileNamePrefix.empty()) 3236 Prefix = DepGraphDotFileNamePrefix; 3237 else 3238 Prefix = "dep_graph"; 3239 std::string Filename = 3240 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot"; 3241 3242 outs() << "Dependency graph dump to " << Filename << ".\n"; 3243 3244 std::error_code EC; 3245 3246 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF); 3247 if (!EC) 3248 llvm::WriteGraph(File, this); 3249 3250 CallTimes++; 3251 } 3252 3253 void AADepGraph::print() { 3254 for (auto DepAA : SyntheticRoot.Deps) 3255 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs()); 3256 } 3257 3258 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) { 3259 FunctionAnalysisManager &FAM = 3260 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 3261 AnalysisGetter AG(FAM); 3262 3263 SetVector<Function *> Functions; 3264 for (Function &F : M) 3265 Functions.insert(&F); 3266 3267 CallGraphUpdater CGUpdater; 3268 BumpPtrAllocator Allocator; 3269 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr); 3270 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater, 3271 /* DeleteFns */ true, /* IsModulePass */ true)) { 3272 // FIXME: Think about passes we will preserve and add them here. 3273 return PreservedAnalyses::none(); 3274 } 3275 return PreservedAnalyses::all(); 3276 } 3277 3278 PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C, 3279 CGSCCAnalysisManager &AM, 3280 LazyCallGraph &CG, 3281 CGSCCUpdateResult &UR) { 3282 FunctionAnalysisManager &FAM = 3283 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 3284 AnalysisGetter AG(FAM); 3285 3286 SetVector<Function *> Functions; 3287 for (LazyCallGraph::Node &N : C) 3288 Functions.insert(&N.getFunction()); 3289 3290 if (Functions.empty()) 3291 return PreservedAnalyses::all(); 3292 3293 Module &M = *Functions.back()->getParent(); 3294 CallGraphUpdater CGUpdater; 3295 CGUpdater.initialize(CG, C, AM, UR); 3296 BumpPtrAllocator Allocator; 3297 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions); 3298 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater, 3299 /* DeleteFns */ false, 3300 /* IsModulePass */ false)) { 3301 // FIXME: Think about passes we will preserve and add them here. 3302 PreservedAnalyses PA; 3303 PA.preserve<FunctionAnalysisManagerCGSCCProxy>(); 3304 return PA; 3305 } 3306 return PreservedAnalyses::all(); 3307 } 3308 3309 namespace llvm { 3310 3311 template <> struct GraphTraits<AADepGraphNode *> { 3312 using NodeRef = AADepGraphNode *; 3313 using DepTy = PointerIntPair<AADepGraphNode *, 1>; 3314 using EdgeRef = PointerIntPair<AADepGraphNode *, 1>; 3315 3316 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; } 3317 static NodeRef DepGetVal(DepTy &DT) { return DT.getPointer(); } 3318 3319 using ChildIteratorType = 3320 mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetVal)>; 3321 using ChildEdgeIteratorType = TinyPtrVector<DepTy>::iterator; 3322 3323 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); } 3324 3325 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); } 3326 }; 3327 3328 template <> 3329 struct GraphTraits<AADepGraph *> : public GraphTraits<AADepGraphNode *> { 3330 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); } 3331 3332 using nodes_iterator = 3333 mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetVal)>; 3334 3335 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); } 3336 3337 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); } 3338 }; 3339 3340 template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits { 3341 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {} 3342 3343 static std::string getNodeLabel(const AADepGraphNode *Node, 3344 const AADepGraph *DG) { 3345 std::string AAString; 3346 raw_string_ostream O(AAString); 3347 Node->print(O); 3348 return AAString; 3349 } 3350 }; 3351 3352 } // end namespace llvm 3353 3354 namespace { 3355 3356 struct AttributorLegacyPass : public ModulePass { 3357 static char ID; 3358 3359 AttributorLegacyPass() : ModulePass(ID) { 3360 initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry()); 3361 } 3362 3363 bool runOnModule(Module &M) override { 3364 if (skipModule(M)) 3365 return false; 3366 3367 AnalysisGetter AG; 3368 SetVector<Function *> Functions; 3369 for (Function &F : M) 3370 Functions.insert(&F); 3371 3372 CallGraphUpdater CGUpdater; 3373 BumpPtrAllocator Allocator; 3374 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr); 3375 return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater, 3376 /* DeleteFns*/ true, 3377 /* IsModulePass */ true); 3378 } 3379 3380 void getAnalysisUsage(AnalysisUsage &AU) const override { 3381 // FIXME: Think about passes we will preserve and add them here. 3382 AU.addRequired<TargetLibraryInfoWrapperPass>(); 3383 } 3384 }; 3385 3386 struct AttributorCGSCCLegacyPass : public CallGraphSCCPass { 3387 static char ID; 3388 3389 AttributorCGSCCLegacyPass() : CallGraphSCCPass(ID) { 3390 initializeAttributorCGSCCLegacyPassPass(*PassRegistry::getPassRegistry()); 3391 } 3392 3393 bool runOnSCC(CallGraphSCC &SCC) override { 3394 if (skipSCC(SCC)) 3395 return false; 3396 3397 SetVector<Function *> Functions; 3398 for (CallGraphNode *CGN : SCC) 3399 if (Function *Fn = CGN->getFunction()) 3400 if (!Fn->isDeclaration()) 3401 Functions.insert(Fn); 3402 3403 if (Functions.empty()) 3404 return false; 3405 3406 AnalysisGetter AG; 3407 CallGraph &CG = const_cast<CallGraph &>(SCC.getCallGraph()); 3408 CallGraphUpdater CGUpdater; 3409 CGUpdater.initialize(CG, SCC); 3410 Module &M = *Functions.back()->getParent(); 3411 BumpPtrAllocator Allocator; 3412 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions); 3413 return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater, 3414 /* DeleteFns */ false, 3415 /* IsModulePass */ false); 3416 } 3417 3418 void getAnalysisUsage(AnalysisUsage &AU) const override { 3419 // FIXME: Think about passes we will preserve and add them here. 3420 AU.addRequired<TargetLibraryInfoWrapperPass>(); 3421 CallGraphSCCPass::getAnalysisUsage(AU); 3422 } 3423 }; 3424 3425 } // end anonymous namespace 3426 3427 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); } 3428 Pass *llvm::createAttributorCGSCCLegacyPass() { 3429 return new AttributorCGSCCLegacyPass(); 3430 } 3431 3432 char AttributorLegacyPass::ID = 0; 3433 char AttributorCGSCCLegacyPass::ID = 0; 3434 3435 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor", 3436 "Deduce and propagate attributes", false, false) 3437 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 3438 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor", 3439 "Deduce and propagate attributes", false, false) 3440 INITIALIZE_PASS_BEGIN(AttributorCGSCCLegacyPass, "attributor-cgscc", 3441 "Deduce and propagate attributes (CGSCC pass)", false, 3442 false) 3443 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 3444 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 3445 INITIALIZE_PASS_END(AttributorCGSCCLegacyPass, "attributor-cgscc", 3446 "Deduce and propagate attributes (CGSCC pass)", false, 3447 false) 3448